aboutsummaryrefslogtreecommitdiff
path: root/src/camera.cpp
blob: 62acbe26dd7ca6dc0602e81d64986083c2804ea6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
/*
Minetest
Copyright (C) 2010-2013 celeron55, Perttu Ahola <celeron55@gmail.com>

This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/

#include "camera.h"
#include "debug.h"
#include "client.h"
#include "map.h"
#include "clientmap.h"     // MapDrawControl
#include "player.h"
#include <cmath>
#include "client/renderingengine.h"
#include "settings.h"
#include "wieldmesh.h"
#include "noise.h"         // easeCurve
#include "sound.h"
#include "event.h"
#include "nodedef.h"
#include "util/numeric.h"
#include "constants.h"
#include "fontengine.h"
#include "script/scripting_client.h"

#define CAMERA_OFFSET_STEP 200
#define WIELDMESH_OFFSET_X 55.0f
#define WIELDMESH_OFFSET_Y -35.0f

Camera::Camera(MapDrawControl &draw_control, Client *client):
	m_draw_control(draw_control),
	m_client(client)
{
	scene::ISceneManager *smgr = RenderingEngine::get_scene_manager();
	// note: making the camera node a child of the player node
	// would lead to unexpected behaviour, so we don't do that.
	m_playernode = smgr->addEmptySceneNode(smgr->getRootSceneNode());
	m_headnode = smgr->addEmptySceneNode(m_playernode);
	m_cameranode = smgr->addCameraSceneNode(smgr->getRootSceneNode());
	m_cameranode->bindTargetAndRotation(true);

	// This needs to be in its own scene manager. It is drawn after
	// all other 3D scene nodes and before the GUI.
	m_wieldmgr = smgr->createNewSceneManager();
	m_wieldmgr->addCameraSceneNode();
	m_wieldnode = new WieldMeshSceneNode(m_wieldmgr, -1, false);
	m_wieldnode->setItem(ItemStack(), m_client);
	m_wieldnode->drop(); // m_wieldmgr grabbed it

	/* TODO: Add a callback function so these can be updated when a setting
	 *       changes.  At this point in time it doesn't matter (e.g. /set
	 *       is documented to change server settings only)
	 *
	 * TODO: Local caching of settings is not optimal and should at some stage
	 *       be updated to use a global settings object for getting thse values
	 *       (as opposed to the this local caching). This can be addressed in
	 *       a later release.
	 */
	m_cache_fall_bobbing_amount = g_settings->getFloat("fall_bobbing_amount");
	m_cache_view_bobbing_amount = g_settings->getFloat("view_bobbing_amount");
	m_cache_fov                 = g_settings->getFloat("fov");
	m_cache_zoom_fov            = g_settings->getFloat("zoom_fov");
	m_arm_inertia		    = g_settings->getBool("arm_inertia");
	m_nametags.clear();
}

Camera::~Camera()
{
	m_wieldmgr->drop();
}

bool Camera::successfullyCreated(std::string &error_message)
{
	if (!m_playernode) {
		error_message = "Failed to create the player scene node";
	} else if (!m_headnode) {
		error_message = "Failed to create the head scene node";
	} else if (!m_cameranode) {
		error_message = "Failed to create the camera scene node";
	} else if (!m_wieldmgr) {
		error_message = "Failed to create the wielded item scene manager";
	} else if (!m_wieldnode) {
		error_message = "Failed to create the wielded item scene node";
	} else {
		error_message.clear();
	}

	if (g_settings->getBool("enable_client_modding")) {
		m_client->getScript()->on_camera_ready(this);
	}
	return error_message.empty();
}

// Returns the fractional part of x
inline f32 my_modf(f32 x)
{
	double dummy;
	return modf(x, &dummy);
}

void Camera::step(f32 dtime)
{
	if(m_view_bobbing_fall > 0)
	{
		m_view_bobbing_fall -= 3 * dtime;
		if(m_view_bobbing_fall <= 0)
			m_view_bobbing_fall = -1; // Mark the effect as finished
	}

	bool was_under_zero = m_wield_change_timer < 0;
	m_wield_change_timer = MYMIN(m_wield_change_timer + dtime, 0.125);

	if (m_wield_change_timer >= 0 && was_under_zero)
		m_wieldnode->setItem(m_wield_item_next, m_client);

	if (m_view_bobbing_state != 0)
	{
		//f32 offset = dtime * m_view_bobbing_speed * 0.035;
		f32 offset = dtime * m_view_bobbing_speed * 0.030;
		if (m_view_bobbing_state == 2) {
			// Animation is getting turned off
			if (m_view_bobbing_anim < 0.25) {
				m_view_bobbing_anim -= offset;
			} else if (m_view_bobbing_anim > 0.75) {
				m_view_bobbing_anim += offset;
			}

			if (m_view_bobbing_anim < 0.5) {
				m_view_bobbing_anim += offset;
				if (m_view_bobbing_anim > 0.5)
					m_view_bobbing_anim = 0.5;
			} else {
				m_view_bobbing_anim -= offset;
				if (m_view_bobbing_anim < 0.5)
					m_view_bobbing_anim = 0.5;
			}

			if (m_view_bobbing_anim <= 0 || m_view_bobbing_anim >= 1 ||
					fabs(m_view_bobbing_anim - 0.5) < 0.01) {
				m_view_bobbing_anim = 0;
				m_view_bobbing_state = 0;
			}
		}
		else {
			float was = m_view_bobbing_anim;
			m_view_bobbing_anim = my_modf(m_view_bobbing_anim + offset);
			bool step = (was == 0 ||
					(was < 0.5f && m_view_bobbing_anim >= 0.5f) ||
					(was > 0.5f && m_view_bobbing_anim <= 0.5f));
			if(step) {
				MtEvent *e = new SimpleTriggerEvent("ViewBobbingStep");
				m_client->event()->put(e);
			}
		}
	}

	if (m_digging_button != -1)
	{
		f32 offset = dtime * 3.5;
		float m_digging_anim_was = m_digging_anim;
		m_digging_anim += offset;
		if (m_digging_anim >= 1)
		{
			m_digging_anim = 0;
			m_digging_button = -1;
		}
		float lim = 0.15;
		if(m_digging_anim_was < lim && m_digging_anim >= lim)
		{
			if(m_digging_button == 0)
			{
				MtEvent *e = new SimpleTriggerEvent("CameraPunchLeft");
				m_client->event()->put(e);
			} else if(m_digging_button == 1) {
				MtEvent *e = new SimpleTriggerEvent("CameraPunchRight");
				m_client->event()->put(e);
			}
		}
	}
}

static inline v2f dir(const v2f &pos_dist)
{
	f32 x = pos_dist.X - WIELDMESH_OFFSET_X;
	f32 y = pos_dist.Y - WIELDMESH_OFFSET_Y;

	f32 x_abs = std::fabs(x);
	f32 y_abs = std::fabs(y);

	if (x_abs >= y_abs) {
		y *= (1.0f / x_abs);
		x /= x_abs;
	}

	if (y_abs >= x_abs) {
		x *= (1.0f / y_abs);
		y /= y_abs;
	}

	return v2f(std::fabs(x), std::fabs(y));
}

void Camera::addArmInertia(f32 player_yaw)
{
	m_cam_vel.X = std::fabs(rangelim(m_last_cam_pos.X - player_yaw,
		-100.0f, 100.0f) / 0.016f) * 0.01f;
	m_cam_vel.Y = std::fabs((m_last_cam_pos.Y - m_camera_direction.Y) / 0.016f);
	f32 gap_X = std::fabs(WIELDMESH_OFFSET_X - m_wieldmesh_offset.X);
	f32 gap_Y = std::fabs(WIELDMESH_OFFSET_Y - m_wieldmesh_offset.Y);

	if (m_cam_vel.X > 1.0f || m_cam_vel.Y > 1.0f) {
		/*
		    The arm moves relative to the camera speed,
		    with an acceleration factor.
		*/

		if (m_cam_vel.X > 1.0f) {
			if (m_cam_vel.X > m_cam_vel_old.X)
				m_cam_vel_old.X = m_cam_vel.X;

			f32 acc_X = 0.12f * (m_cam_vel.X - (gap_X * 0.1f));
			m_wieldmesh_offset.X += m_last_cam_pos.X < player_yaw ? acc_X : -acc_X;

			if (m_last_cam_pos.X != player_yaw)
				m_last_cam_pos.X = player_yaw;

			m_wieldmesh_offset.X = rangelim(m_wieldmesh_offset.X,
				WIELDMESH_OFFSET_X - 7.0f, WIELDMESH_OFFSET_X + 7.0f);
		}

		if (m_cam_vel.Y > 1.0f) {
			if (m_cam_vel.Y > m_cam_vel_old.Y)
				m_cam_vel_old.Y = m_cam_vel.Y;

			f32 acc_Y = 0.12f * (m_cam_vel.Y - (gap_Y * 0.1f));
			m_wieldmesh_offset.Y +=
				m_last_cam_pos.Y > m_camera_direction.Y ? acc_Y : -acc_Y;

			if (m_last_cam_pos.Y != m_camera_direction.Y)
				m_last_cam_pos.Y = m_camera_direction.Y;

			m_wieldmesh_offset.Y = rangelim(m_wieldmesh_offset.Y,
				WIELDMESH_OFFSET_Y - 10.0f, WIELDMESH_OFFSET_Y + 5.0f);
		}

		m_arm_dir = dir(m_wieldmesh_offset);
	} else {
		/*
		    Now the arm gets back to its default position when the camera stops,
		    following a vector, with a smooth deceleration factor.
		*/

		f32 dec_X = 0.12f * (m_cam_vel_old.X * (1.0f +
			(1.0f - m_arm_dir.X))) * (gap_X / 20.0f);

		f32 dec_Y = 0.06f * (m_cam_vel_old.Y * (1.0f +
			(1.0f - m_arm_dir.Y))) * (gap_Y / 15.0f);

		if (gap_X < 0.1f)
			m_cam_vel_old.X = 0.0f;

		m_wieldmesh_offset.X -=
			m_wieldmesh_offset.X > WIELDMESH_OFFSET_X ? dec_X : -dec_X;

		if (gap_Y < 0.1f)
			m_cam_vel_old.Y = 0.0f;

		m_wieldmesh_offset.Y -=
			m_wieldmesh_offset.Y > WIELDMESH_OFFSET_Y ? dec_Y : -dec_Y;
	}
}

void Camera::update(LocalPlayer* player, f32 frametime, f32 busytime, f32 tool_reload_ratio)
{
	// Get player position
	// Smooth the movement when walking up stairs
	v3f old_player_position = m_playernode->getPosition();
	v3f player_position = player->getPosition();
	if (player->isAttached && player->parent)
		player_position = player->parent->getPosition();
	//if(player->touching_ground && player_position.Y > old_player_position.Y)
	if(player->touching_ground &&
			player_position.Y > old_player_position.Y)
	{
		f32 oldy = old_player_position.Y;
		f32 newy = player_position.Y;
		f32 t = exp(-23*frametime);
		player_position.Y = oldy * t + newy * (1-t);
	}

	// Set player node transformation
	m_playernode->setPosition(player_position);
	m_playernode->setRotation(v3f(0, -1 * player->getYaw(), 0));
	m_playernode->updateAbsolutePosition();

	// Get camera tilt timer (hurt animation)
	float cameratilt = fabs(fabs(player->hurt_tilt_timer-0.75)-0.75);

	// Fall bobbing animation
	float fall_bobbing = 0;
	if(player->camera_impact >= 1 && m_camera_mode < CAMERA_MODE_THIRD)
	{
		if(m_view_bobbing_fall == -1) // Effect took place and has finished
			player->camera_impact = m_view_bobbing_fall = 0;
		else if(m_view_bobbing_fall == 0) // Initialize effect
			m_view_bobbing_fall = 1;

		// Convert 0 -> 1 to 0 -> 1 -> 0
		fall_bobbing = m_view_bobbing_fall < 0.5 ? m_view_bobbing_fall * 2 : -(m_view_bobbing_fall - 0.5) * 2 + 1;
		// Smoothen and invert the above
		fall_bobbing = sin(fall_bobbing * 0.5 * M_PI) * -1;
		// Amplify according to the intensity of the impact
		fall_bobbing *= (1 - rangelim(50 / player->camera_impact, 0, 1)) * 5;

		fall_bobbing *= m_cache_fall_bobbing_amount;
	}

	// Calculate players eye offset for different camera modes
	v3f PlayerEyeOffset = player->getEyeOffset();
	if (m_camera_mode == CAMERA_MODE_FIRST)
		PlayerEyeOffset += player->eye_offset_first;
	else
		PlayerEyeOffset += player->eye_offset_third;

	// Set head node transformation
	m_headnode->setPosition(PlayerEyeOffset+v3f(0,cameratilt*-player->hurt_tilt_strength+fall_bobbing,0));
	m_headnode->setRotation(v3f(player->getPitch(), 0, cameratilt*player->hurt_tilt_strength));
	m_headnode->updateAbsolutePosition();

	// Compute relative camera position and target
	v3f rel_cam_pos = v3f(0,0,0);
	v3f rel_cam_target = v3f(0,0,1);
	v3f rel_cam_up = v3f(0,1,0);

	if (m_cache_view_bobbing_amount != 0.0f && m_view_bobbing_anim != 0.0f &&
		m_camera_mode < CAMERA_MODE_THIRD) {
		f32 bobfrac = my_modf(m_view_bobbing_anim * 2);
		f32 bobdir = (m_view_bobbing_anim < 0.5) ? 1.0 : -1.0;

		#if 1
		f32 bobknob = 1.2;
		f32 bobtmp = sin(pow(bobfrac, bobknob) * M_PI);
		//f32 bobtmp2 = cos(pow(bobfrac, bobknob) * M_PI);

		v3f bobvec = v3f(
			0.3 * bobdir * sin(bobfrac * M_PI),
			-0.28 * bobtmp * bobtmp,
			0.);

		//rel_cam_pos += 0.2 * bobvec;
		//rel_cam_target += 0.03 * bobvec;
		//rel_cam_up.rotateXYBy(0.02 * bobdir * bobtmp * M_PI);
		float f = 1.0;
		f *= m_cache_view_bobbing_amount;
		rel_cam_pos += bobvec * f;
		//rel_cam_target += 0.995 * bobvec * f;
		rel_cam_target += bobvec * f;
		rel_cam_target.Z -= 0.005 * bobvec.Z * f;
		//rel_cam_target.X -= 0.005 * bobvec.X * f;
		//rel_cam_target.Y -= 0.005 * bobvec.Y * f;
		rel_cam_up.rotateXYBy(-0.03 * bobdir * bobtmp * M_PI * f);
		#else
		f32 angle_deg = 1 * bobdir * sin(bobfrac * M_PI);
		f32 angle_rad = angle_deg * M_PI / 180;
		f32 r = 0.05;
		v3f off = v3f(
			r * sin(angle_rad),
			r * (cos(angle_rad) - 1),
			0);
		rel_cam_pos += off;
		//rel_cam_target += off;
		rel_cam_up.rotateXYBy(angle_deg);
		#endif

	}

	// Compute absolute camera position and target
	m_headnode->getAbsoluteTransformation().transformVect(m_camera_position, rel_cam_pos);
	m_headnode->getAbsoluteTransformation().rotateVect(m_camera_direction, rel_cam_target - rel_cam_pos);

	v3f abs_cam_up;
	m_headnode->getAbsoluteTransformation().rotateVect(abs_cam_up, rel_cam_up);

	// Seperate camera position for calculation
	v3f my_cp = m_camera_position;

	// Reposition the camera for third person view
	if (m_camera_mode > CAMERA_MODE_FIRST)
	{
		if (m_camera_mode == CAMERA_MODE_THIRD_FRONT)
			m_camera_direction *= -1;

		my_cp.Y += 2;

		// Calculate new position
		bool abort = false;
		for (int i = BS; i <= BS * 2.75; i++) {
			my_cp.X = m_camera_position.X + m_camera_direction.X * -i;
			my_cp.Z = m_camera_position.Z + m_camera_direction.Z * -i;
			if (i > 12)
				my_cp.Y = m_camera_position.Y + (m_camera_direction.Y * -i);

			// Prevent camera positioned inside nodes
			INodeDefManager *nodemgr = m_client->ndef();
			MapNode n = m_client->getEnv().getClientMap()
				.getNodeNoEx(floatToInt(my_cp, BS));

			const ContentFeatures& features = nodemgr->get(n);
			if (features.walkable) {
				my_cp.X += m_camera_direction.X*-1*-BS/2;
				my_cp.Z += m_camera_direction.Z*-1*-BS/2;
				my_cp.Y += m_camera_direction.Y*-1*-BS/2;
				abort = true;
				break;
			}
		}

		// If node blocks camera position don't move y to heigh
		if (abort && my_cp.Y > player_position.Y+BS*2)
			my_cp.Y = player_position.Y+BS*2;
	}

	// Update offset if too far away from the center of the map
	m_camera_offset.X += CAMERA_OFFSET_STEP*
			(((s16)(my_cp.X/BS) - m_camera_offset.X)/CAMERA_OFFSET_STEP);
	m_camera_offset.Y += CAMERA_OFFSET_STEP*
			(((s16)(my_cp.Y/BS) - m_camera_offset.Y)/CAMERA_OFFSET_STEP);
	m_camera_offset.Z += CAMERA_OFFSET_STEP*
			(((s16)(my_cp.Z/BS) - m_camera_offset.Z)/CAMERA_OFFSET_STEP);

	// Set camera node transformation
	m_cameranode->setPosition(my_cp-intToFloat(m_camera_offset, BS));
	m_cameranode->setUpVector(abs_cam_up);
	// *100.0 helps in large map coordinates
	m_cameranode->setTarget(my_cp-intToFloat(m_camera_offset, BS) + 100 * m_camera_direction);

	// update the camera position in front-view mode to render blocks behind player
	if (m_camera_mode == CAMERA_MODE_THIRD_FRONT)
		m_camera_position = my_cp;

	// Get FOV
	f32 fov_degrees;
	if (player->getPlayerControl().zoom && player->getCanZoom()) {
		fov_degrees = m_cache_zoom_fov;
	} else {
		fov_degrees = m_cache_fov;
	}
	fov_degrees = rangelim(fov_degrees, 1.0, 160.0);

	// FOV and aspect ratio
	const v2u32 &window_size = RenderingEngine::get_instance()->getWindowSize();
	m_aspect = (f32) window_size.X / (f32) window_size.Y;
	m_fov_y = fov_degrees * M_PI / 180.0;
	// Increase vertical FOV on lower aspect ratios (<16:10)
	m_fov_y *= MYMAX(1.0, MYMIN(1.4, sqrt(16./10. / m_aspect)));
	m_fov_x = 2 * atan(m_aspect * tan(0.5 * m_fov_y));
	m_cameranode->setAspectRatio(m_aspect);
	m_cameranode->setFOV(m_fov_y);

	if (m_arm_inertia)
		addArmInertia(player->getYaw());

	// Position the wielded item
	//v3f wield_position = v3f(45, -35, 65);
	v3f wield_position = v3f(m_wieldmesh_offset.X, m_wieldmesh_offset.Y, 65);
	//v3f wield_rotation = v3f(-100, 120, -100);
	v3f wield_rotation = v3f(-100, 120, -100);
	wield_position.Y += fabs(m_wield_change_timer)*320 - 40;
	if(m_digging_anim < 0.05 || m_digging_anim > 0.5)
	{
		f32 frac = 1.0;
		if(m_digging_anim > 0.5)
			frac = 2.0 * (m_digging_anim - 0.5);
		// This value starts from 1 and settles to 0
		f32 ratiothing = pow((1.0f - tool_reload_ratio), 0.5f);
		//f32 ratiothing2 = pow(ratiothing, 0.5f);
		f32 ratiothing2 = (easeCurve(ratiothing*0.5))*2.0;
		wield_position.Y -= frac * 25.0 * pow(ratiothing2, 1.7f);
		//wield_position.Z += frac * 5.0 * ratiothing2;
		wield_position.X -= frac * 35.0 * pow(ratiothing2, 1.1f);
		wield_rotation.Y += frac * 70.0 * pow(ratiothing2, 1.4f);
		//wield_rotation.X -= frac * 15.0 * pow(ratiothing2, 1.4f);
		//wield_rotation.Z += frac * 15.0 * pow(ratiothing2, 1.0f);
	}
	if (m_digging_button != -1)
	{
		f32 digfrac = m_digging_anim;
		wield_position.X -= 50 * sin(pow(digfrac, 0.8f) * M_PI);
		wield_position.Y += 24 * sin(digfrac * 1.8 * M_PI);
		wield_position.Z += 25 * 0.5;

		// Euler angles are PURE EVIL, so why not use quaternions?
		core::quaternion quat_begin(wield_rotation * core::DEGTORAD);
		core::quaternion quat_end(v3f(80, 30, 100) * core::DEGTORAD);
		core::quaternion quat_slerp;
		quat_slerp.slerp(quat_begin, quat_end, sin(digfrac * M_PI));
		quat_slerp.toEuler(wield_rotation);
		wield_rotation *= core::RADTODEG;
	} else {
		f32 bobfrac = my_modf(m_view_bobbing_anim);
		wield_position.X -= sin(bobfrac*M_PI*2.0) * 3.0;
		wield_position.Y += sin(my_modf(bobfrac*2.0)*M_PI) * 3.0;
	}
	m_wieldnode->setPosition(wield_position);
	m_wieldnode->setRotation(wield_rotation);

	m_wieldnode->setColor(player->light_color);

	// Set render distance
	updateViewingRange();

	// If the player is walking, swimming, or climbing,
	// view bobbing is enabled and free_move is off,
	// start (or continue) the view bobbing animation.
	const v3f &speed = player->getSpeed();
	const bool movement_XZ = hypot(speed.X, speed.Z) > BS;
	const bool movement_Y = fabs(speed.Y) > BS;

	const bool walking = movement_XZ && player->touching_ground;
	const bool swimming = (movement_XZ || player->swimming_vertical) && player->in_liquid;
	const bool climbing = movement_Y && player->is_climbing;
	if ((walking || swimming || climbing) &&
			(!g_settings->getBool("free_move") || !m_client->checkLocalPrivilege("fly"))) {
		// Start animation
		m_view_bobbing_state = 1;
		m_view_bobbing_speed = MYMIN(speed.getLength(), 70);
	}
	else if (m_view_bobbing_state == 1)
	{
		// Stop animation
		m_view_bobbing_state = 2;
		m_view_bobbing_speed = 60;
	}
}

void Camera::updateViewingRange()
{
	f32 viewing_range = g_settings->getFloat("viewing_range");
	f32 near_plane = g_settings->getFloat("near_plane");

	m_draw_control.wanted_range = std::fmin(adjustDist(viewing_range, getFovMax()), 4000);
	m_cameranode->setNearValue(rangelim(near_plane, 0.0f, 0.5f) * BS);
	if (m_draw_control.range_all) {
		m_cameranode->setFarValue(100000.0);
		return;
	}
	m_cameranode->setFarValue((viewing_range < 2000) ? 2000 * BS : viewing_range * BS);
}

void Camera::setDigging(s32 button)
{
	if (m_digging_button == -1)
		m_digging_button = button;
}

void Camera::wield(const ItemStack &item)
{
	if (item.name != m_wield_item_next.name ||
			item.metadata != m_wield_item_next.metadata) {
		m_wield_item_next = item;
		if (m_wield_change_timer > 0)
			m_wield_change_timer = -m_wield_change_timer;
		else if (m_wield_change_timer == 0)
			m_wield_change_timer = -0.001;
	}
}

void Camera::drawWieldedTool(irr::core::matrix4* translation)
{
	// Clear Z buffer so that the wielded tool stay in front of world geometry
	m_wieldmgr->getVideoDriver()->clearZBuffer();

	// Draw the wielded node (in a separate scene manager)
	scene::ICameraSceneNode* cam = m_wieldmgr->getActiveCamera();
	cam->setAspectRatio(m_cameranode->getAspectRatio());
	cam->setFOV(72.0*M_PI/180.0);
	cam->setNearValue(10);
	cam->setFarValue(1000);
	if (translation != NULL)
	{
		irr::core::matrix4 startMatrix = cam->getAbsoluteTransformation();
		irr::core::vector3df focusPoint = (cam->getTarget()
				- cam->getAbsolutePosition()).setLength(1)
				+ cam->getAbsolutePosition();

		irr::core::vector3df camera_pos =
				(startMatrix * *translation).getTranslation();
		cam->setPosition(camera_pos);
		cam->setTarget(focusPoint);
	}
	m_wieldmgr->drawAll();
}

void Camera::drawNametags()
{
	core::matrix4 trans = m_cameranode->getProjectionMatrix();
	trans *= m_cameranode->getViewMatrix();

	for (std::list<Nametag *>::const_iterator
			i = m_nametags.begin();
			i != m_nametags.end(); ++i) {
		Nametag *nametag = *i;
		if (nametag->nametag_color.getAlpha() == 0) {
			// Enforce hiding nametag,
			// because if freetype is enabled, a grey
			// shadow can remain.
			continue;
		}
		v3f pos = nametag->parent_node->getAbsolutePosition() + nametag->nametag_pos * BS;
		f32 transformed_pos[4] = { pos.X, pos.Y, pos.Z, 1.0f };
		trans.multiplyWith1x4Matrix(transformed_pos);
		if (transformed_pos[3] > 0) {
			std::wstring nametag_colorless =
				unescape_translate(utf8_to_wide(nametag->nametag_text));
			core::dimension2d<u32> textsize =
				g_fontengine->getFont()->getDimension(
				nametag_colorless.c_str());
			f32 zDiv = transformed_pos[3] == 0.0f ? 1.0f :
				core::reciprocal(transformed_pos[3]);
			v2u32 screensize = RenderingEngine::get_video_driver()->getScreenSize();
			v2s32 screen_pos;
			screen_pos.X = screensize.X *
				(0.5 * transformed_pos[0] * zDiv + 0.5) - textsize.Width / 2;
			screen_pos.Y = screensize.Y *
				(0.5 - transformed_pos[1] * zDiv * 0.5) - textsize.Height / 2;
			core::rect<s32> size(0, 0, textsize.Width, textsize.Height);
			g_fontengine->getFont()->draw(
				translate_string(utf8_to_wide(nametag->nametag_text)).c_str(),
				size + screen_pos, nametag->nametag_color);
		}
	}
}

Nametag *Camera::addNametag(scene::ISceneNode *parent_node,
		const std::string &nametag_text, video::SColor nametag_color,
		const v3f &pos)
{
	Nametag *nametag = new Nametag(parent_node, nametag_text, nametag_color, pos);
	m_nametags.push_back(nametag);
	return nametag;
}

void Camera::removeNametag(Nametag *nametag)
{
	m_nametags.remove(nametag);
	delete nametag;
}
class="hl com"> appends "^crack0" to it and gets a new texture id with getTextureId("stone.png^mineral_coal.png^crack0"). */ /* Gets a texture id from cache or - if main thread, generates the texture, adds to cache and returns id. - if other thread, adds to request queue and waits for main thread. The id 0 points to a NULL texture. It is returned in case of error. */ u32 getTextureId(const std::string &name); // Finds out the name of a cached texture. std::string getTextureName(u32 id); /* If texture specified by the name pointed by the id doesn't exist, create it, then return the cached texture. Can be called from any thread. If called from some other thread and not found in cache, the call is queued to the main thread for processing. */ video::ITexture* getTexture(u32 id); video::ITexture* getTexture(const std::string &name, u32 *id = NULL); /* Get a texture specifically intended for mesh application, i.e. not HUD, compositing, or other 2D use. This texture may be a different size and may have had additional filters applied. */ video::ITexture* getTextureForMesh(const std::string &name, u32 *id); virtual Palette* getPalette(const std::string &name); bool isKnownSourceImage(const std::string &name) { bool is_known = false; bool cache_found = m_source_image_existence.get(name, &is_known); if (cache_found) return is_known; // Not found in cache; find out if a local file exists is_known = (!getTexturePath(name).empty()); m_source_image_existence.set(name, is_known); return is_known; } // Processes queued texture requests from other threads. // Shall be called from the main thread. void processQueue(); // Insert an image into the cache without touching the filesystem. // Shall be called from the main thread. void insertSourceImage(const std::string &name, video::IImage *img); // Rebuild images and textures from the current set of source images // Shall be called from the main thread. void rebuildImagesAndTextures(); video::ITexture* getNormalTexture(const std::string &name); video::SColor getTextureAverageColor(const std::string &name); video::ITexture *getShaderFlagsTexture(bool normamap_present); private: // The id of the thread that is allowed to use irrlicht directly std::thread::id m_main_thread; // Cache of source images // This should be only accessed from the main thread SourceImageCache m_sourcecache; // Generate a texture u32 generateTexture(const std::string &name); // Generate image based on a string like "stone.png" or "[crack:1:0". // if baseimg is NULL, it is created. Otherwise stuff is made on it. bool generateImagePart(std::string part_of_name, video::IImage *& baseimg); /*! Generates an image from a full string like * "stone.png^mineral_coal.png^[crack:1:0". * Shall be called from the main thread. * The returned Image should be dropped. */ video::IImage* generateImage(const std::string &name); // Thread-safe cache of what source images are known (true = known) MutexedMap<std::string, bool> m_source_image_existence; // A texture id is index in this array. // The first position contains a NULL texture. std::vector<TextureInfo> m_textureinfo_cache; // Maps a texture name to an index in the former. std::map<std::string, u32> m_name_to_id; // The two former containers are behind this mutex std::mutex m_textureinfo_cache_mutex; // Queued texture fetches (to be processed by the main thread) RequestQueue<std::string, u32, u8, u8> m_get_texture_queue; // Textures that have been overwritten with other ones // but can't be deleted because the ITexture* might still be used std::vector<video::ITexture*> m_texture_trash; // Maps image file names to loaded palettes. std::unordered_map<std::string, Palette> m_palettes; // Cached settings needed for making textures from meshes bool m_setting_trilinear_filter; bool m_setting_bilinear_filter; bool m_setting_anisotropic_filter; }; IWritableTextureSource *createTextureSource() { return new TextureSource(); } TextureSource::TextureSource() { m_main_thread = std::this_thread::get_id(); // Add a NULL TextureInfo as the first index, named "" m_textureinfo_cache.emplace_back(""); m_name_to_id[""] = 0; // Cache some settings // Note: Since this is only done once, the game must be restarted // for these settings to take effect m_setting_trilinear_filter = g_settings->getBool("trilinear_filter"); m_setting_bilinear_filter = g_settings->getBool("bilinear_filter"); m_setting_anisotropic_filter = g_settings->getBool("anisotropic_filter"); } TextureSource::~TextureSource() { video::IVideoDriver *driver = RenderingEngine::get_video_driver(); unsigned int textures_before = driver->getTextureCount(); for (const auto &iter : m_textureinfo_cache) { //cleanup texture if (iter.texture) driver->removeTexture(iter.texture); } m_textureinfo_cache.clear(); for (auto t : m_texture_trash) { //cleanup trashed texture driver->removeTexture(t); } infostream << "~TextureSource() "<< textures_before << "/" << driver->getTextureCount() << std::endl; } u32 TextureSource::getTextureId(const std::string &name) { //infostream<<"getTextureId(): \""<<name<<"\""<<std::endl; { /* See if texture already exists */ MutexAutoLock lock(m_textureinfo_cache_mutex); std::map<std::string, u32>::iterator n; n = m_name_to_id.find(name); if (n != m_name_to_id.end()) { return n->second; } } /* Get texture */ if (std::this_thread::get_id() == m_main_thread) { return generateTexture(name); } infostream<<"getTextureId(): Queued: name=\""<<name<<"\""<<std::endl; // We're gonna ask the result to be put into here static ResultQueue<std::string, u32, u8, u8> result_queue; // Throw a request in m_get_texture_queue.add(name, 0, 0, &result_queue); try { while(true) { // Wait result for a second GetResult<std::string, u32, u8, u8> result = result_queue.pop_front(1000); if (result.key == name) { return result.item; } } } catch(ItemNotFoundException &e) { errorstream << "Waiting for texture " << name << " timed out." << std::endl; return 0; } infostream << "getTextureId(): Failed" << std::endl; return 0; } // Draw an image on top of an another one, using the alpha channel of the // source image static void blit_with_alpha(video::IImage *src, video::IImage *dst, v2s32 src_pos, v2s32 dst_pos, v2u32 size); // Like blit_with_alpha, but only modifies destination pixels that // are fully opaque static void blit_with_alpha_overlay(video::IImage *src, video::IImage *dst, v2s32 src_pos, v2s32 dst_pos, v2u32 size); // Apply a color to an image. Uses an int (0-255) to calculate the ratio. // If the ratio is 255 or -1 and keep_alpha is true, then it multiples the // color alpha with the destination alpha. // Otherwise, any pixels that are not fully transparent get the color alpha. static void apply_colorize(video::IImage *dst, v2u32 dst_pos, v2u32 size, const video::SColor &color, int ratio, bool keep_alpha); // paint a texture using the given color static void apply_multiplication(video::IImage *dst, v2u32 dst_pos, v2u32 size, const video::SColor &color); // Apply a mask to an image static void apply_mask(video::IImage *mask, video::IImage *dst, v2s32 mask_pos, v2s32 dst_pos, v2u32 size); // Draw or overlay a crack static void draw_crack(video::IImage *crack, video::IImage *dst, bool use_overlay, s32 frame_count, s32 progression, video::IVideoDriver *driver, u8 tiles = 1); // Brighten image void brighten(video::IImage *image); // Parse a transform name u32 parseImageTransform(const std::string& s); // Apply transform to image dimension core::dimension2d<u32> imageTransformDimension(u32 transform, core::dimension2d<u32> dim); // Apply transform to image data void imageTransform(u32 transform, video::IImage *src, video::IImage *dst); /* This method generates all the textures */ u32 TextureSource::generateTexture(const std::string &name) { //infostream << "generateTexture(): name=\"" << name << "\"" << std::endl; // Empty name means texture 0 if (name.empty()) { infostream<<"generateTexture(): name is empty"<<std::endl; return 0; } { /* See if texture already exists */ MutexAutoLock lock(m_textureinfo_cache_mutex); std::map<std::string, u32>::iterator n; n = m_name_to_id.find(name); if (n != m_name_to_id.end()) { return n->second; } } /* Calling only allowed from main thread */ if (std::this_thread::get_id() != m_main_thread) { errorstream<<"TextureSource::generateTexture() " "called not from main thread"<<std::endl; return 0; } video::IVideoDriver *driver = RenderingEngine::get_video_driver(); sanity_check(driver); video::IImage *img = generateImage(name); video::ITexture *tex = NULL; if (img != NULL) { #if ENABLE_GLES img = Align2Npot2(img, driver); #endif // Create texture from resulting image tex = driver->addTexture(name.c_str(), img); guiScalingCache(io::path(name.c_str()), driver, img); img->drop(); } /* Add texture to caches (add NULL textures too) */ MutexAutoLock lock(m_textureinfo_cache_mutex); u32 id = m_textureinfo_cache.size(); TextureInfo ti(name, tex); m_textureinfo_cache.push_back(ti); m_name_to_id[name] = id; return id; } std::string TextureSource::getTextureName(u32 id) { MutexAutoLock lock(m_textureinfo_cache_mutex); if (id >= m_textureinfo_cache.size()) { errorstream<<"TextureSource::getTextureName(): id="<<id <<" >= m_textureinfo_cache.size()=" <<m_textureinfo_cache.size()<<std::endl; return ""; } return m_textureinfo_cache[id].name; } video::ITexture* TextureSource::getTexture(u32 id) { MutexAutoLock lock(m_textureinfo_cache_mutex); if (id >= m_textureinfo_cache.size()) return NULL; return m_textureinfo_cache[id].texture; } video::ITexture* TextureSource::getTexture(const std::string &name, u32 *id) { u32 actual_id = getTextureId(name); if (id){ *id = actual_id; } return getTexture(actual_id); } video::ITexture* TextureSource::getTextureForMesh(const std::string &name, u32 *id) { return getTexture(name + "^[applyfiltersformesh", id); } Palette* TextureSource::getPalette(const std::string &name) { // Only the main thread may load images sanity_check(std::this_thread::get_id() == m_main_thread); if (name.empty()) return NULL; auto it = m_palettes.find(name); if (it == m_palettes.end()) { // Create palette video::IImage *img = generateImage(name); if (!img) { warningstream << "TextureSource::getPalette(): palette \"" << name << "\" could not be loaded." << std::endl; return NULL; } Palette new_palette; u32 w = img->getDimension().Width; u32 h = img->getDimension().Height; // Real area of the image u32 area = h * w; if (area == 0) return NULL; if (area > 256) { warningstream << "TextureSource::getPalette(): the specified" << " palette image \"" << name << "\" is larger than 256" << " pixels, using the first 256." << std::endl; area = 256; } else if (256 % area != 0) warningstream << "TextureSource::getPalette(): the " << "specified palette image \"" << name << "\" does not " << "contain power of two pixels." << std::endl; // We stretch the palette so it will fit 256 values // This many param2 values will have the same color u32 step = 256 / area; // For each pixel in the image for (u32 i = 0; i < area; i++) { video::SColor c = img->getPixel(i % w, i / w); // Fill in palette with 'step' colors for (u32 j = 0; j < step; j++) new_palette.push_back(c); } img->drop(); // Fill in remaining elements while (new_palette.size() < 256) new_palette.emplace_back(0xFFFFFFFF); m_palettes[name] = new_palette; it = m_palettes.find(name); } if (it != m_palettes.end()) return &((*it).second); return NULL; } void TextureSource::processQueue() { /* Fetch textures */ //NOTE this is only thread safe for ONE consumer thread! if (!m_get_texture_queue.empty()) { GetRequest<std::string, u32, u8, u8> request = m_get_texture_queue.pop(); /*infostream<<"TextureSource::processQueue(): " <<"got texture request with " <<"name=\""<<request.key<<"\"" <<std::endl;*/ m_get_texture_queue.pushResult(request, generateTexture(request.key)); } } void TextureSource::insertSourceImage(const std::string &name, video::IImage *img) { //infostream<<"TextureSource::insertSourceImage(): name="<<name<<std::endl; sanity_check(std::this_thread::get_id() == m_main_thread); m_sourcecache.insert(name, img, true); m_source_image_existence.set(name, true); } void TextureSource::rebuildImagesAndTextures() { MutexAutoLock lock(m_textureinfo_cache_mutex); video::IVideoDriver *driver = RenderingEngine::get_video_driver(); sanity_check(driver); // Recreate textures for (TextureInfo &ti : m_textureinfo_cache) { video::IImage *img = generateImage(ti.name); #if ENABLE_GLES img = Align2Npot2(img, driver); #endif // Create texture from resulting image video::ITexture *t = NULL; if (img) { t = driver->addTexture(ti.name.c_str(), img); guiScalingCache(io::path(ti.name.c_str()), driver, img); img->drop(); } video::ITexture *t_old = ti.texture; // Replace texture ti.texture = t; if (t_old) m_texture_trash.push_back(t_old); } } inline static void applyShadeFactor(video::SColor &color, u32 factor) { u32 f = core::clamp<u32>(factor, 0, 256); color.setRed(color.getRed() * f / 256); color.setGreen(color.getGreen() * f / 256); color.setBlue(color.getBlue() * f / 256); } static video::IImage *createInventoryCubeImage( video::IImage *top, video::IImage *left, video::IImage *right) { core::dimension2du size_top = top->getDimension(); core::dimension2du size_left = left->getDimension(); core::dimension2du size_right = right->getDimension(); u32 size = npot2(std::max({ size_top.Width, size_top.Height, size_left.Width, size_left.Height, size_right.Width, size_right.Height, })); // It must be divisible by 4, to let everything work correctly. // But it is a power of 2, so being at least 4 is the same. // And the resulting texture should't be too large as well. size = core::clamp<u32>(size, 4, 64); // With such parameters, the cube fits exactly, touching each image line // from `0` to `cube_size - 1`. (Note that division is exact here). u32 cube_size = 9 * size; u32 offset = size / 2; video::IVideoDriver *driver = RenderingEngine::get_video_driver(); auto lock_image = [size, driver] (video::IImage *&image) -> const u32 * { image->grab(); core::dimension2du dim = image->getDimension(); video::ECOLOR_FORMAT format = image->getColorFormat(); if (dim.Width != size || dim.Height != size || format != video::ECF_A8R8G8B8) { video::IImage *scaled = driver->createImage(video::ECF_A8R8G8B8, {size, size}); image->copyToScaling(scaled); image->drop(); image = scaled; } sanity_check(image->getPitch() == 4 * size); return reinterpret_cast<u32 *>(image->lock()); }; auto free_image = [] (video::IImage *image) -> void { image->unlock(); image->drop(); }; video::IImage *result = driver->createImage(video::ECF_A8R8G8B8, {cube_size, cube_size}); sanity_check(result->getPitch() == 4 * cube_size); result->fill(video::SColor(0x00000000u)); u32 *target = reinterpret_cast<u32 *>(result->lock()); // Draws single cube face // `shade_factor` is face brightness, in range [0.0, 1.0] // (xu, xv, x1; yu, yv, y1) form coordinate transformation matrix // `offsets` list pixels to be drawn for single source pixel auto draw_image = [=] (video::IImage *image, float shade_factor, s16 xu, s16 xv, s16 x1, s16 yu, s16 yv, s16 y1, std::initializer_list<v2s16> offsets) -> void { u32 brightness = core::clamp<u32>(256 * shade_factor, 0, 256); const u32 *source = lock_image(image); for (u16 v = 0; v < size; v++) { for (u16 u = 0; u < size; u++) { video::SColor pixel(*source); applyShadeFactor(pixel, brightness); s16 x = xu * u + xv * v + x1; s16 y = yu * u + yv * v + y1; for (const auto &off : offsets) target[(y + off.Y) * cube_size + (x + off.X) + offset] = pixel.color; source++; } } free_image(image); }; draw_image(top, 1.000000f, 4, -4, 4 * (size - 1), 2, 2, 0, { {2, 0}, {3, 0}, {4, 0}, {5, 0}, {0, 1}, {1, 1}, {2, 1}, {3, 1}, {4, 1}, {5, 1}, {6, 1}, {7, 1}, {2, 2}, {3, 2}, {4, 2}, {5, 2}, }); draw_image(left, 0.836660f, 4, 0, 0, 2, 5, 2 * size, { {0, 0}, {1, 0}, {0, 1}, {1, 1}, {2, 1}, {3, 1}, {0, 2}, {1, 2}, {2, 2}, {3, 2}, {0, 3}, {1, 3}, {2, 3}, {3, 3}, {0, 4}, {1, 4}, {2, 4}, {3, 4}, {2, 5}, {3, 5}, }); draw_image(right, 0.670820f, 4, 0, 4 * size, -2, 5, 4 * size - 2, { {2, 0}, {3, 0}, {0, 1}, {1, 1}, {2, 1}, {3, 1}, {0, 2}, {1, 2}, {2, 2}, {3, 2}, {0, 3}, {1, 3}, {2, 3}, {3, 3}, {0, 4}, {1, 4}, {2, 4}, {3, 4}, {0, 5}, {1, 5}, }); result->unlock(); return result; } video::IImage* TextureSource::generateImage(const std::string &name) { // Get the base image const char separator = '^'; const char escape = '\\'; const char paren_open = '('; const char paren_close = ')'; // Find last separator in the name s32 last_separator_pos = -1; u8 paren_bal = 0; for (s32 i = name.size() - 1; i >= 0; i--) { if (i > 0 && name[i-1] == escape) continue; switch (name[i]) { case separator: if (paren_bal == 0) { last_separator_pos = i; i = -1; // break out of loop } break; case paren_open: if (paren_bal == 0) { errorstream << "generateImage(): unbalanced parentheses" << "(extranous '(') while generating texture \"" << name << "\"" << std::endl; return NULL; } paren_bal--; break; case paren_close: paren_bal++; break; default: break; } } if (paren_bal > 0) { errorstream << "generateImage(): unbalanced parentheses" << "(missing matching '(') while generating texture \"" << name << "\"" << std::endl; return NULL; } video::IImage *baseimg = NULL; /* If separator was found, make the base image using a recursive call. */ if (last_separator_pos != -1) { baseimg = generateImage(name.substr(0, last_separator_pos)); } /* Parse out the last part of the name of the image and act according to it */ std::string last_part_of_name = name.substr(last_separator_pos + 1); /* If this name is enclosed in parentheses, generate it and blit it onto the base image */ if (last_part_of_name[0] == paren_open && last_part_of_name[last_part_of_name.size() - 1] == paren_close) { std::string name2 = last_part_of_name.substr(1, last_part_of_name.size() - 2); video::IImage *tmp = generateImage(name2); if (!tmp) { errorstream << "generateImage(): " "Failed to generate \"" << name2 << "\"" << std::endl; return NULL; } core::dimension2d<u32> dim = tmp->getDimension(); if (baseimg) { blit_with_alpha(tmp, baseimg, v2s32(0, 0), v2s32(0, 0), dim); tmp->drop(); } else { baseimg = tmp; } } else if (!generateImagePart(last_part_of_name, baseimg)) { // Generate image according to part of name errorstream << "generateImage(): " "Failed to generate \"" << last_part_of_name << "\"" << std::endl; } // If no resulting image, print a warning if (baseimg == NULL) { errorstream << "generateImage(): baseimg is NULL (attempted to" " create texture \"" << name << "\")" << std::endl; } return baseimg; } #if ENABLE_GLES static inline u16 get_GL_major_version() { const GLubyte *gl_version = glGetString(GL_VERSION); return (u16) (gl_version[0] - '0'); } /** * Check if hardware requires npot2 aligned textures * @return true if alignment NOT(!) requires, false otherwise */ bool hasNPotSupport() { // Only GLES2 is trusted to correctly report npot support // Note: we cache the boolean result, the GL context will never change. static const bool supported = get_GL_major_version() > 1 && glGetString(GL_EXTENSIONS) && strstr((char *)glGetString(GL_EXTENSIONS), "GL_OES_texture_npot"); return supported; } /** * Check and align image to npot2 if required by hardware * @param image image to check for npot2 alignment * @param driver driver to use for image operations * @return image or copy of image aligned to npot2 */ video::IImage * Align2Npot2(video::IImage * image, video::IVideoDriver* driver) { if (image == NULL) return image; if (hasNPotSupport()) return image; core::dimension2d<u32> dim = image->getDimension(); unsigned int height = npot2(dim.Height); unsigned int width = npot2(dim.Width); if (dim.Height == height && dim.Width == width) return image; if (dim.Height > height) height *= 2; if (dim.Width > width) width *= 2; video::IImage *targetimage = driver->createImage(video::ECF_A8R8G8B8, core::dimension2d<u32>(width, height)); if (targetimage != NULL) image->copyToScaling(targetimage); image->drop(); return targetimage; } #endif static std::string unescape_string(const std::string &str, const char esc = '\\') { std::string out; size_t pos = 0, cpos; out.reserve(str.size()); while (1) { cpos = str.find_first_of(esc, pos); if (cpos == std::string::npos) { out += str.substr(pos); break; } out += str.substr(pos, cpos - pos) + str[cpos + 1]; pos = cpos + 2; } return out; } bool TextureSource::generateImagePart(std::string part_of_name, video::IImage *& baseimg) { const char escape = '\\'; // same as in generateImage() video::IVideoDriver *driver = RenderingEngine::get_video_driver(); sanity_check(driver); // Stuff starting with [ are special commands if (part_of_name.empty() || part_of_name[0] != '[') { video::IImage *image = m_sourcecache.getOrLoad(part_of_name); #if ENABLE_GLES image = Align2Npot2(image, driver); #endif if (image == NULL) { if (!part_of_name.empty()) { // Do not create normalmap dummies if (part_of_name.find("_normal.png") != std::string::npos) { warningstream << "generateImage(): Could not load normal map \"" << part_of_name << "\"" << std::endl; return true; } errorstream << "generateImage(): Could not load image \"" << part_of_name << "\" while building texture; " "Creating a dummy image" << std::endl; } // Just create a dummy image //core::dimension2d<u32> dim(2,2); core::dimension2d<u32> dim(1,1); image = driver->createImage(video::ECF_A8R8G8B8, dim); sanity_check(image != NULL); /*image->setPixel(0,0, video::SColor(255,255,0,0)); image->setPixel(1,0, video::SColor(255,0,255,0)); image->setPixel(0,1, video::SColor(255,0,0,255)); image->setPixel(1,1, video::SColor(255,255,0,255));*/ image->setPixel(0,0, video::SColor(255,myrand()%256, myrand()%256,myrand()%256)); /*image->setPixel(1,0, video::SColor(255,myrand()%256, myrand()%256,myrand()%256)); image->setPixel(0,1, video::SColor(255,myrand()%256, myrand()%256,myrand()%256)); image->setPixel(1,1, video::SColor(255,myrand()%256, myrand()%256,myrand()%256));*/ } // If base image is NULL, load as base. if (baseimg == NULL) { //infostream<<"Setting "<<part_of_name<<" as base"<<std::endl; /* Copy it this way to get an alpha channel. Otherwise images with alpha cannot be blitted on images that don't have alpha in the original file. */ core::dimension2d<u32> dim = image->getDimension(); baseimg = driver->createImage(video::ECF_A8R8G8B8, dim); image->copyTo(baseimg); } // Else blit on base. else { //infostream<<"Blitting "<<part_of_name<<" on base"<<std::endl; // Size of the copied area core::dimension2d<u32> dim = image->getDimension(); //core::dimension2d<u32> dim(16,16); // Position to copy the blitted to in the base image core::position2d<s32> pos_to(0,0); // Position to copy the blitted from in the blitted image core::position2d<s32> pos_from(0,0); // Blit /*image->copyToWithAlpha(baseimg, pos_to, core::rect<s32>(pos_from, dim), video::SColor(255,255,255,255), NULL);*/ core::dimension2d<u32> dim_dst = baseimg->getDimension(); if (dim == dim_dst) { blit_with_alpha(image, baseimg, pos_from, pos_to, dim); } else if (dim.Width * dim.Height < dim_dst.Width * dim_dst.Height) { // Upscale overlying image video::IImage *scaled_image = RenderingEngine::get_video_driver()-> createImage(video::ECF_A8R8G8B8, dim_dst); image->copyToScaling(scaled_image); blit_with_alpha(scaled_image, baseimg, pos_from, pos_to, dim_dst); scaled_image->drop(); } else { // Upscale base image video::IImage *scaled_base = RenderingEngine::get_video_driver()-> createImage(video::ECF_A8R8G8B8, dim); baseimg->copyToScaling(scaled_base); baseimg->drop(); baseimg = scaled_base; blit_with_alpha(image, baseimg, pos_from, pos_to, dim); } } //cleanup image->drop(); } else { // A special texture modification /*infostream<<"generateImage(): generating special " <<"modification \""<<part_of_name<<"\"" <<std::endl;*/ /* [crack:N:P [cracko:N:P Adds a cracking texture N = animation frame count, P = crack progression */ if (str_starts_with(part_of_name, "[crack")) { if (baseimg == NULL) { errorstream<<"generateImagePart(): baseimg == NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } // Crack image number and overlay option // Format: crack[o][:<tiles>]:<frame_count>:<frame> bool use_overlay = (part_of_name[6] == 'o'); Strfnd sf(part_of_name); sf.next(":"); s32 frame_count = stoi(sf.next(":")); s32 progression = stoi(sf.next(":")); s32 tiles = 1; // Check whether there is the <tiles> argument, that is, // whether there are 3 arguments. If so, shift values // as the first and not the last argument is optional. auto s = sf.next(":"); if (!s.empty()) { tiles = frame_count; frame_count = progression; progression = stoi(s); } if (progression >= 0) { /* Load crack image. It is an image with a number of cracking stages horizontally tiled. */ video::IImage *img_crack = m_sourcecache.getOrLoad( "crack_anylength.png"); if (img_crack) { draw_crack(img_crack, baseimg, use_overlay, frame_count, progression, driver, tiles); img_crack->drop(); } } } /* [combine:WxH:X,Y=filename:X,Y=filename2 Creates a bigger texture from any amount of smaller ones */ else if (str_starts_with(part_of_name, "[combine")) { Strfnd sf(part_of_name); sf.next(":"); u32 w0 = stoi(sf.next("x")); u32 h0 = stoi(sf.next(":")); core::dimension2d<u32> dim(w0,h0); if (baseimg == NULL) { baseimg = driver->createImage(video::ECF_A8R8G8B8, dim); baseimg->fill(video::SColor(0,0,0,0)); } while (!sf.at_end()) { u32 x = stoi(sf.next(",")); u32 y = stoi(sf.next("=")); std::string filename = unescape_string(sf.next_esc(":", escape), escape); infostream<<"Adding \""<<filename <<"\" to combined ("<<x<<","<<y<<")" <<std::endl; video::IImage *img = generateImage(filename); if (img) { core::dimension2d<u32> dim = img->getDimension(); infostream<<"Size "<<dim.Width <<"x"<<dim.Height<<std::endl; core::position2d<s32> pos_base(x, y); video::IImage *img2 = driver->createImage(video::ECF_A8R8G8B8, dim); img->copyTo(img2); img->drop(); /*img2->copyToWithAlpha(baseimg, pos_base, core::rect<s32>(v2s32(0,0), dim), video::SColor(255,255,255,255), NULL);*/ blit_with_alpha(img2, baseimg, v2s32(0,0), pos_base, dim); img2->drop(); } else { errorstream << "generateImagePart(): Failed to load image \"" << filename << "\" for [combine" << std::endl; } } } /* [brighten */ else if (str_starts_with(part_of_name, "[brighten")) { if (baseimg == NULL) { errorstream<<"generateImagePart(): baseimg==NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } brighten(baseimg); } /* [noalpha Make image completely opaque. Used for the leaves texture when in old leaves mode, so that the transparent parts don't look completely black when simple alpha channel is used for rendering. */ else if (str_starts_with(part_of_name, "[noalpha")) { if (baseimg == NULL){ errorstream<<"generateImagePart(): baseimg==NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } core::dimension2d<u32> dim = baseimg->getDimension(); // Set alpha to full for (u32 y=0; y<dim.Height; y++) for (u32 x=0; x<dim.Width; x++) { video::SColor c = baseimg->getPixel(x,y); c.setAlpha(255); baseimg->setPixel(x,y,c); } } /* [makealpha:R,G,B Convert one color to transparent. */ else if (str_starts_with(part_of_name, "[makealpha:")) { if (baseimg == NULL) { errorstream<<"generateImagePart(): baseimg == NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } Strfnd sf(part_of_name.substr(11)); u32 r1 = stoi(sf.next(",")); u32 g1 = stoi(sf.next(",")); u32 b1 = stoi(sf.next("")); core::dimension2d<u32> dim = baseimg->getDimension(); /*video::IImage *oldbaseimg = baseimg; baseimg = driver->createImage(video::ECF_A8R8G8B8, dim); oldbaseimg->copyTo(baseimg); oldbaseimg->drop();*/ // Set alpha to full for (u32 y=0; y<dim.Height; y++) for (u32 x=0; x<dim.Width; x++) { video::SColor c = baseimg->getPixel(x,y); u32 r = c.getRed(); u32 g = c.getGreen(); u32 b = c.getBlue(); if (!(r == r1 && g == g1 && b == b1)) continue; c.setAlpha(0); baseimg->setPixel(x,y,c); } } /* [transformN Rotates and/or flips the image. N can be a number (between 0 and 7) or a transform name. Rotations are counter-clockwise. 0 I identity 1 R90 rotate by 90 degrees 2 R180 rotate by 180 degrees 3 R270 rotate by 270 degrees 4 FX flip X 5 FXR90 flip X then rotate by 90 degrees 6 FY flip Y 7 FYR90 flip Y then rotate by 90 degrees Note: Transform names can be concatenated to produce their product (applies the first then the second). The resulting transform will be equivalent to one of the eight existing ones, though (see: dihedral group). */ else if (str_starts_with(part_of_name, "[transform")) { if (baseimg == NULL) { errorstream<<"generateImagePart(): baseimg == NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } u32 transform = parseImageTransform(part_of_name.substr(10)); core::dimension2d<u32> dim = imageTransformDimension( transform, baseimg->getDimension()); video::IImage *image = driver->createImage( baseimg->getColorFormat(), dim); sanity_check(image != NULL); imageTransform(transform, baseimg, image); baseimg->drop(); baseimg = image; } /* [inventorycube{topimage{leftimage{rightimage In every subimage, replace ^ with &. Create an "inventory cube". NOTE: This should be used only on its own. Example (a grass block (not actually used in game): "[inventorycube{grass.png{mud.png&grass_side.png{mud.png&grass_side.png" */ else if (str_starts_with(part_of_name, "[inventorycube")) { if (baseimg != NULL){ errorstream<<"generateImagePart(): baseimg != NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } str_replace(part_of_name, '&', '^'); Strfnd sf(part_of_name); sf.next("{"); std::string imagename_top = sf.next("{"); std::string imagename_left = sf.next("{"); std::string imagename_right = sf.next("{"); // Generate images for the faces of the cube video::IImage *img_top = generateImage(imagename_top); video::IImage *img_left = generateImage(imagename_left); video::IImage *img_right = generateImage(imagename_right); if (img_top == NULL || img_left == NULL || img_right == NULL) { errorstream << "generateImagePart(): Failed to create textures" << " for inventorycube \"" << part_of_name << "\"" << std::endl; baseimg = generateImage(imagename_top); return true; } baseimg = createInventoryCubeImage(img_top, img_left, img_right); // Face images are not needed anymore img_top->drop(); img_left->drop(); img_right->drop(); return true; } /* [lowpart:percent:filename Adds the lower part of a texture */ else if (str_starts_with(part_of_name, "[lowpart:")) { Strfnd sf(part_of_name); sf.next(":"); u32 percent = stoi(sf.next(":")); std::string filename = unescape_string(sf.next_esc(":", escape), escape); if (baseimg == NULL) baseimg = driver->createImage(video::ECF_A8R8G8B8, v2u32(16,16)); video::IImage *img = generateImage(filename); if (img) { core::dimension2d<u32> dim = img->getDimension(); core::position2d<s32> pos_base(0, 0); video::IImage *img2 = driver->createImage(video::ECF_A8R8G8B8, dim); img->copyTo(img2); img->drop(); core::position2d<s32> clippos(0, 0); clippos.Y = dim.Height * (100-percent) / 100; core::dimension2d<u32> clipdim = dim; clipdim.Height = clipdim.Height * percent / 100 + 1; core::rect<s32> cliprect(clippos, clipdim); img2->copyToWithAlpha(baseimg, pos_base, core::rect<s32>(v2s32(0,0), dim), video::SColor(255,255,255,255), &cliprect); img2->drop(); } } /* [verticalframe:N:I Crops a frame of a vertical animation. N = frame count, I = frame index */ else if (str_starts_with(part_of_name, "[verticalframe:")) { Strfnd sf(part_of_name); sf.next(":"); u32 frame_count = stoi(sf.next(":")); u32 frame_index = stoi(sf.next(":")); if (baseimg == NULL){ errorstream<<"generateImagePart(): baseimg != NULL " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } v2u32 frame_size = baseimg->getDimension(); frame_size.Y /= frame_count; video::IImage *img = driver->createImage(video::ECF_A8R8G8B8, frame_size); if (!img){ errorstream<<"generateImagePart(): Could not create image " <<"for part_of_name=\""<<part_of_name <<"\", cancelling."<<std::endl; return false; } // Fill target image with transparency img->fill(video::SColor(0,0,0,0)); core::dimension2d<u32> dim = frame_size; core::position2d<s32> pos_dst(0, 0); core::position2d<s32> pos_src(0, frame_index * frame_size.Y); baseimg->copyToWithAlpha(img, pos_dst, core::rect<s32>(pos_src, dim), video::SColor(255,255,255,255), NULL); // Replace baseimg baseimg->drop(); baseimg = img; } /* [mask:filename Applies a mask to an image */ else if (str_starts_with(part_of_name, "[mask:")) { if (baseimg == NULL) { errorstream << "generateImage(): baseimg == NULL " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } Strfnd sf(part_of_name); sf.next(":"); std::string filename = unescape_string(sf.next_esc(":", escape), escape); video::IImage *img = generateImage(filename); if (img) { apply_mask(img, baseimg, v2s32(0, 0), v2s32(0, 0), img->getDimension()); img->drop(); } else { errorstream << "generateImage(): Failed to load \"" << filename << "\"."; } } /* [multiply:color multiplys a given color to any pixel of an image color = color as ColorString */ else if (str_starts_with(part_of_name, "[multiply:")) { Strfnd sf(part_of_name); sf.next(":"); std::string color_str = sf.next(":"); if (baseimg == NULL) { errorstream << "generateImagePart(): baseimg != NULL " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } video::SColor color; if (!parseColorString(color_str, color, false)) return false; apply_multiplication(baseimg, v2u32(0, 0), baseimg->getDimension(), color); } /* [colorize:color Overlays image with given color color = color as ColorString */ else if (str_starts_with(part_of_name, "[colorize:")) { Strfnd sf(part_of_name); sf.next(":"); std::string color_str = sf.next(":"); std::string ratio_str = sf.next(":"); if (baseimg == NULL) { errorstream << "generateImagePart(): baseimg != NULL " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } video::SColor color; int ratio = -1; bool keep_alpha = false; if (!parseColorString(color_str, color, false)) return false; if (is_number(ratio_str)) ratio = mystoi(ratio_str, 0, 255); else if (ratio_str == "alpha") keep_alpha = true; apply_colorize(baseimg, v2u32(0, 0), baseimg->getDimension(), color, ratio, keep_alpha); } /* [applyfiltersformesh Internal modifier */ else if (str_starts_with(part_of_name, "[applyfiltersformesh")) { // Apply the "clean transparent" filter, if configured. if (g_settings->getBool("texture_clean_transparent")) imageCleanTransparent(baseimg, 127); /* Upscale textures to user's requested minimum size. This is a trick to make * filters look as good on low-res textures as on high-res ones, by making * low-res textures BECOME high-res ones. This is helpful for worlds that * mix high- and low-res textures, or for mods with least-common-denominator * textures that don't have the resources to offer high-res alternatives. */ const bool filter = m_setting_trilinear_filter || m_setting_bilinear_filter; const s32 scaleto = filter ? g_settings->getS32("texture_min_size") : 1; if (scaleto > 1) { const core::dimension2d<u32> dim = baseimg->getDimension(); /* Calculate scaling needed to make the shortest texture dimension * equal to the target minimum. If e.g. this is a vertical frames * animation, the short dimension will be the real size. */ if ((dim.Width == 0) || (dim.Height == 0)) { errorstream << "generateImagePart(): Illegal 0 dimension " << "for part_of_name=\""<< part_of_name << "\", cancelling." << std::endl; return false; } u32 xscale = scaleto / dim.Width; u32 yscale = scaleto / dim.Height; u32 scale = (xscale > yscale) ? xscale : yscale; // Never downscale; only scale up by 2x or more. if (scale > 1) { u32 w = scale * dim.Width; u32 h = scale * dim.Height; const core::dimension2d<u32> newdim = core::dimension2d<u32>(w, h); video::IImage *newimg = driver->createImage( baseimg->getColorFormat(), newdim); baseimg->copyToScaling(newimg); baseimg->drop(); baseimg = newimg; } } } /* [resize:WxH Resizes the base image to the given dimensions */ else if (str_starts_with(part_of_name, "[resize")) { if (baseimg == NULL) { errorstream << "generateImagePart(): baseimg == NULL " << "for part_of_name=\""<< part_of_name << "\", cancelling." << std::endl; return false; } Strfnd sf(part_of_name); sf.next(":"); u32 width = stoi(sf.next("x")); u32 height = stoi(sf.next("")); core::dimension2d<u32> dim(width, height); video::IImage *image = RenderingEngine::get_video_driver()-> createImage(video::ECF_A8R8G8B8, dim); baseimg->copyToScaling(image); baseimg->drop(); baseimg = image; } /* [opacity:R Makes the base image transparent according to the given ratio. R must be between 0 and 255. 0 means totally transparent. 255 means totally opaque. */ else if (str_starts_with(part_of_name, "[opacity:")) { if (baseimg == NULL) { errorstream << "generateImagePart(): baseimg == NULL " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } Strfnd sf(part_of_name); sf.next(":"); u32 ratio = mystoi(sf.next(""), 0, 255); core::dimension2d<u32> dim = baseimg->getDimension(); for (u32 y = 0; y < dim.Height; y++) for (u32 x = 0; x < dim.Width; x++) { video::SColor c = baseimg->getPixel(x, y); c.setAlpha(floor((c.getAlpha() * ratio) / 255 + 0.5)); baseimg->setPixel(x, y, c); } } /* [invert:mode Inverts the given channels of the base image. Mode may contain the characters "r", "g", "b", "a". Only the channels that are mentioned in the mode string will be inverted. */ else if (str_starts_with(part_of_name, "[invert:")) { if (baseimg == NULL) { errorstream << "generateImagePart(): baseimg == NULL " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } Strfnd sf(part_of_name); sf.next(":"); std::string mode = sf.next(""); u32 mask = 0; if (mode.find('a') != std::string::npos) mask |= 0xff000000UL; if (mode.find('r') != std::string::npos) mask |= 0x00ff0000UL; if (mode.find('g') != std::string::npos) mask |= 0x0000ff00UL; if (mode.find('b') != std::string::npos) mask |= 0x000000ffUL; core::dimension2d<u32> dim = baseimg->getDimension(); for (u32 y = 0; y < dim.Height; y++) for (u32 x = 0; x < dim.Width; x++) { video::SColor c = baseimg->getPixel(x, y); c.color ^= mask; baseimg->setPixel(x, y, c); } } /* [sheet:WxH:X,Y Retrieves a tile at position X,Y (in tiles) from the base image it assumes to be a tilesheet with dimensions W,H (in tiles). */ else if (part_of_name.substr(0,7) == "[sheet:") { if (baseimg == NULL) { errorstream << "generateImagePart(): baseimg != NULL " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } Strfnd sf(part_of_name); sf.next(":"); u32 w0 = stoi(sf.next("x")); u32 h0 = stoi(sf.next(":")); u32 x0 = stoi(sf.next(",")); u32 y0 = stoi(sf.next(":")); core::dimension2d<u32> img_dim = baseimg->getDimension(); core::dimension2d<u32> tile_dim(v2u32(img_dim) / v2u32(w0, h0)); video::IImage *img = driver->createImage( video::ECF_A8R8G8B8, tile_dim); if (!img) { errorstream << "generateImagePart(): Could not create image " << "for part_of_name=\"" << part_of_name << "\", cancelling." << std::endl; return false; } img->fill(video::SColor(0,0,0,0)); v2u32 vdim(tile_dim); core::rect<s32> rect(v2s32(x0 * vdim.X, y0 * vdim.Y), tile_dim); baseimg->copyToWithAlpha(img, v2s32(0), rect, video::SColor(255,255,255,255), NULL); // Replace baseimg baseimg->drop(); baseimg = img; } else { errorstream << "generateImagePart(): Invalid " " modification: \"" << part_of_name << "\"" << std::endl; } } return true; } /* Calculate the color of a single pixel drawn on top of another pixel. This is a little more complicated than just video::SColor::getInterpolated because getInterpolated does not handle alpha correctly. For example, a pixel with alpha=64 drawn atop a pixel with alpha=128 should yield a pixel with alpha=160, while getInterpolated would yield alpha=96. */ static inline video::SColor blitPixel(const video::SColor &src_c, const video::SColor &dst_c, u32 ratio) { if (dst_c.getAlpha() == 0) return src_c; video::SColor out_c = src_c.getInterpolated(dst_c, (float)ratio / 255.0f); out_c.setAlpha(dst_c.getAlpha() + (255 - dst_c.getAlpha()) * src_c.getAlpha() * ratio / (255 * 255)); return out_c; } /* Draw an image on top of an another one, using the alpha channel of the source image This exists because IImage::copyToWithAlpha() doesn't seem to always work. */ static void blit_with_alpha(video::IImage *src, video::IImage *dst, v2s32 src_pos, v2s32 dst_pos, v2u32 size) { for (u32 y0=0; y0<size.Y; y0++) for (u32 x0=0; x0<size.X; x0++) { s32 src_x = src_pos.X + x0; s32 src_y = src_pos.Y + y0; s32 dst_x = dst_pos.X + x0; s32 dst_y = dst_pos.Y + y0; video::SColor src_c = src->getPixel(src_x, src_y); video::SColor dst_c = dst->getPixel(dst_x, dst_y); dst_c = blitPixel(src_c, dst_c, src_c.getAlpha()); dst->setPixel(dst_x, dst_y, dst_c); } } /* Draw an image on top of an another one, using the alpha channel of the source image; only modify fully opaque pixels in destinaion */ static void blit_with_alpha_overlay(video::IImage *src, video::IImage *dst, v2s32 src_pos, v2s32 dst_pos, v2u32 size) { for (u32 y0=0; y0<size.Y; y0++) for (u32 x0=0; x0<size.X; x0++) { s32 src_x = src_pos.X + x0; s32 src_y = src_pos.Y + y0; s32 dst_x = dst_pos.X + x0; s32 dst_y = dst_pos.Y + y0; video::SColor src_c = src->getPixel(src_x, src_y); video::SColor dst_c = dst->getPixel(dst_x, dst_y); if (dst_c.getAlpha() == 255 && src_c.getAlpha() != 0) { dst_c = blitPixel(src_c, dst_c, src_c.getAlpha()); dst->setPixel(dst_x, dst_y, dst_c); } } } // This function has been disabled because it is currently unused. // Feel free to re-enable if you find it handy. #if 0 /* Draw an image on top of an another one, using the specified ratio modify all partially-opaque pixels in the destination. */ static void blit_with_interpolate_overlay(video::IImage *src, video::IImage *dst, v2s32 src_pos, v2s32 dst_pos, v2u32 size, int ratio) { for (u32 y0 = 0; y0 < size.Y; y0++) for (u32 x0 = 0; x0 < size.X; x0++) { s32 src_x = src_pos.X + x0; s32 src_y = src_pos.Y + y0; s32 dst_x = dst_pos.X + x0; s32 dst_y = dst_pos.Y + y0; video::SColor src_c = src->getPixel(src_x, src_y); video::SColor dst_c = dst->getPixel(dst_x, dst_y); if (dst_c.getAlpha() > 0 && src_c.getAlpha() != 0) { if (ratio == -1) dst_c = src_c.getInterpolated(dst_c, (float)src_c.getAlpha()/255.0f); else dst_c = src_c.getInterpolated(dst_c, (float)ratio/255.0f); dst->setPixel(dst_x, dst_y, dst_c); } } } #endif /* Apply color to destination */ static void apply_colorize(video::IImage *dst, v2u32 dst_pos, v2u32 size, const video::SColor &color, int ratio, bool keep_alpha) { u32 alpha = color.getAlpha(); video::SColor dst_c; if ((ratio == -1 && alpha == 255) || ratio == 255) { // full replacement of color if (keep_alpha) { // replace the color with alpha = dest alpha * color alpha dst_c = color; for (u32 y = dst_pos.Y; y < dst_pos.Y + size.Y; y++) for (u32 x = dst_pos.X; x < dst_pos.X + size.X; x++) { u32 dst_alpha = dst->getPixel(x, y).getAlpha(); if (dst_alpha > 0) { dst_c.setAlpha(dst_alpha * alpha / 255); dst->setPixel(x, y, dst_c); } } } else { // replace the color including the alpha for (u32 y = dst_pos.Y; y < dst_pos.Y + size.Y; y++) for (u32 x = dst_pos.X; x < dst_pos.X + size.X; x++) if (dst->getPixel(x, y).getAlpha() > 0) dst->setPixel(x, y, color); } } else { // interpolate between the color and destination float interp = (ratio == -1 ? color.getAlpha() / 255.0f : ratio / 255.0f); for (u32 y = dst_pos.Y; y < dst_pos.Y + size.Y; y++) for (u32 x = dst_pos.X; x < dst_pos.X + size.X; x++) { dst_c = dst->getPixel(x, y); if (dst_c.getAlpha() > 0) { dst_c = color.getInterpolated(dst_c, interp); dst->setPixel(x, y, dst_c); } } } } /* Apply color to destination */ static void apply_multiplication(video::IImage *dst, v2u32 dst_pos, v2u32 size, const video::SColor &color) { video::SColor dst_c; for (u32 y = dst_pos.Y; y < dst_pos.Y + size.Y; y++) for (u32 x = dst_pos.X; x < dst_pos.X + size.X; x++) { dst_c = dst->getPixel(x, y); dst_c.set( dst_c.getAlpha(), (dst_c.getRed() * color.getRed()) / 255, (dst_c.getGreen() * color.getGreen()) / 255, (dst_c.getBlue() * color.getBlue()) / 255 ); dst->setPixel(x, y, dst_c); } } /* Apply mask to destination */ static void apply_mask(video::IImage *mask, video::IImage *dst, v2s32 mask_pos, v2s32 dst_pos, v2u32 size) { for (u32 y0 = 0; y0 < size.Y; y0++) {