aboutsummaryrefslogtreecommitdiff
path: root/src/emerge.h
blob: 6f204666df70776a69aea6aef2e4a41630ebe8ed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
/*
Minetest
Copyright (C) 2010-2013 kwolekr, Ryan Kwolek <kwolekr@minetest.net>

This program is free software; you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation; either version 2.1 of the License, or
(at your option) any later version.

This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
GNU Lesser General Public License for more details.

You should have received a copy of the GNU Lesser General Public License along
with this program; if not, write to the Free Software Foundation, Inc.,
51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/

#pragma once

#include <map>
#include <mutex>
#include "network/networkprotocol.h"
#include "irr_v3d.h"
#include "util/container.h"
#include "mapgen/mapgen.h" // for MapgenParams
#include "map.h"

#define BLOCK_EMERGE_ALLOW_GEN   (1 << 0)
#define BLOCK_EMERGE_FORCE_QUEUE (1 << 1)

#define EMERGE_DBG_OUT(x) {                            \
	if (enable_mapgen_debug_info)                      \
		infostream << "EmergeThread: " x << std::endl; \
}

class EmergeThread;
class NodeDefManager;
class Settings;

class BiomeManager;
class OreManager;
class DecorationManager;
class SchematicManager;
class Server;
class ModApiMapgen;

// Structure containing inputs/outputs for chunk generation
struct BlockMakeData {
	MMVManip *vmanip = nullptr;
	u64 seed = 0;
	v3s16 blockpos_min;
	v3s16 blockpos_max;
	v3s16 blockpos_requested;
	UniqueQueue<v3s16> transforming_liquid;
	const NodeDefManager *nodedef = nullptr;

	BlockMakeData() = default;

	~BlockMakeData() { delete vmanip; }
};

// Result from processing an item on the emerge queue
enum EmergeAction {
	EMERGE_CANCELLED,
	EMERGE_ERRORED,
	EMERGE_FROM_MEMORY,
	EMERGE_FROM_DISK,
	EMERGE_GENERATED,
};

// Callback
typedef void (*EmergeCompletionCallback)(
	v3s16 blockpos, EmergeAction action, void *param);

typedef std::vector<
	std::pair<
		EmergeCompletionCallback,
		void *
	>
> EmergeCallbackList;

struct BlockEmergeData {
	u16 peer_requested;
	u16 flags;
	EmergeCallbackList callbacks;
};

class EmergeParams {
	friend class EmergeManager;
public:
	EmergeParams() = delete;
	~EmergeParams();
	DISABLE_CLASS_COPY(EmergeParams);

	const NodeDefManager *ndef; // shared
	bool enable_mapgen_debug_info;

	u32 gen_notify_on;
	const std::set<u32> *gen_notify_on_deco_ids; // shared

	BiomeManager *biomemgr;
	OreManager *oremgr;
	DecorationManager *decomgr;
	SchematicManager *schemmgr;

private:
	EmergeParams(EmergeManager *parent, const BiomeManager *biomemgr,
		const OreManager *oremgr, const DecorationManager *decomgr,
		const SchematicManager *schemmgr);
};

class EmergeManager {
	/* The mod API needs unchecked access to allow:
	 * - using decomgr or oremgr to place decos/ores
	 * - using schemmgr to load and place schematics
	 */
	friend class ModApiMapgen;
public:
	const NodeDefManager *ndef;
	bool enable_mapgen_debug_info;

	// Generation Notify
	u32 gen_notify_on = 0;
	std::set<u32> gen_notify_on_deco_ids;

	// Parameters passed to mapgens owned by ServerMap
	// TODO(hmmmm): Remove this after mapgen helper methods using them
	// are moved to ServerMap
	MapgenParams *mgparams;

	// Hackish workaround:
	// For now, EmergeManager must hold onto a ptr to the Map's setting manager
	// since the Map can only be accessed through the Environment, and the
	// Environment is not created until after script initialization.
	MapSettingsManager *map_settings_mgr;

	// Methods
	EmergeManager(Server *server);
	~EmergeManager();
	DISABLE_CLASS_COPY(EmergeManager);

	// no usage restrictions
	const BiomeManager *getBiomeManager() const { return biomemgr; }
	const OreManager *getOreManager() const { return oremgr; }
	const DecorationManager *getDecorationManager() const { return decomgr; }
	const SchematicManager *getSchematicManager() const { return schemmgr; }
	// only usable before mapgen init
	BiomeManager *getWritableBiomeManager();
	OreManager *getWritableOreManager();
	DecorationManager *getWritableDecorationManager();
	SchematicManager *getWritableSchematicManager();

	void initMapgens(MapgenParams *mgparams);

	void startThreads();
	void stopThreads();
	bool isRunning();

	bool enqueueBlockEmerge(
		session_t peer_id,
		v3s16 blockpos,
		bool allow_generate,
		bool ignore_queue_limits=false);

	bool enqueueBlockEmergeEx(
		v3s16 blockpos,
		session_t peer_id,
		u16 flags,
		EmergeCompletionCallback callback,
		void *callback_param);

	v3s16 getContainingChunk(v3s16 blockpos);

	Mapgen *getCurrentMapgen();

	// Mapgen helpers methods
	int getSpawnLevelAtPoint(v2s16 p);
	int getGroundLevelAtPoint(v2s16 p);
	bool isBlockUnderground(v3s16 blockpos);

	static v3s16 getContainingChunk(v3s16 blockpos, s16 chunksize);

private:
	std::vector<Mapgen *> m_mapgens;
	std::vector<EmergeThread *> m_threads;
	bool m_threads_active = false;

	std::mutex m_queue_mutex;
	std::map<v3s16, BlockEmergeData> m_blocks_enqueued;
	std::unordered_map<u16, u16> m_peer_queue_count;

	u16 m_qlimit_total;
	u16 m_qlimit_diskonly;
	u16 m_qlimit_generate;

	// Managers of various map generation-related components
	// Note that each Mapgen gets a copy(!) of these to work with
	BiomeManager *biomemgr;
	OreManager *oremgr;
	DecorationManager *decomgr;
	SchematicManager *schemmgr;

	// Requires m_queue_mutex held
	EmergeThread *getOptimalThread();

	bool pushBlockEmergeData(
		v3s16 pos,
		u16 peer_requested,
		u16 flags,
		EmergeCompletionCallback callback,
		void *callback_param,
		bool *entry_already_exists);

	bool popBlockEmergeData(v3s16 pos, BlockEmergeData *bedata);

	friend class EmergeThread;
};
h" #include "util/string.h" static std::string getMediaCacheDir() { return porting::path_user + DIR_DELIM + "cache" + DIR_DELIM + "media"; } /* ClientMediaDownloader */ ClientMediaDownloader::ClientMediaDownloader(): m_media_cache(getMediaCacheDir()) { m_initial_step_done = false; m_name_bound = ""; // works because "" is an invalid file name m_uncached_count = 0; m_uncached_received_count = 0; m_httpfetch_caller = HTTPFETCH_DISCARD; m_httpfetch_active = 0; m_httpfetch_active_limit = 0; m_httpfetch_next_id = 0; m_httpfetch_timeout = 0; m_outstanding_hash_sets = 0; } ClientMediaDownloader::~ClientMediaDownloader() { if (m_httpfetch_caller != HTTPFETCH_DISCARD) httpfetch_caller_free(m_httpfetch_caller); for (std::map<std::string, FileStatus*>::iterator it = m_files.begin(); it != m_files.end(); ++it) delete it->second; for (u32 i = 0; i < m_remotes.size(); ++i) delete m_remotes[i]; } void ClientMediaDownloader::addFile(std::string name, std::string sha1) { assert(!m_initial_step_done); // if name was already announced, ignore the new announcement if (m_files.count(name) != 0) { errorstream << "Client: ignoring duplicate media announcement " << "sent by server: \"" << name << "\"" << std::endl; return; } // if name is empty or contains illegal characters, ignore the file if (name.empty() || !string_allowed(name, TEXTURENAME_ALLOWED_CHARS)) { errorstream << "Client: ignoring illegal file name " << "sent by server: \"" << name << "\"" << std::endl; return; } // length of sha1 must be exactly 20 (160 bits), else ignore the file if (sha1.size() != 20) { errorstream << "Client: ignoring illegal SHA1 sent by server: " << hex_encode(sha1) << " \"" << name << "\"" << std::endl; return; } FileStatus *filestatus = new FileStatus; filestatus->received = false; filestatus->sha1 = sha1; filestatus->current_remote = -1; m_files.insert(std::make_pair(name, filestatus)); } void ClientMediaDownloader::addRemoteServer(std::string baseurl) { assert(!m_initial_step_done); #ifdef USE_CURL infostream << "Client: Adding remote server \"" << baseurl << "\" for media download" << std::endl; RemoteServerStatus *remote = new RemoteServerStatus; remote->baseurl = baseurl; remote->active_count = 0; remote->request_by_filename = false; m_remotes.push_back(remote); #else infostream << "Client: Ignoring remote server \"" << baseurl << "\" because cURL support is not compiled in" << std::endl; #endif } void ClientMediaDownloader::step(Client *client) { if (!m_initial_step_done) { initialStep(client); m_initial_step_done = true; } // Remote media: check for completion of fetches if (m_httpfetch_active) { bool fetched_something = false; HTTPFetchResult fetchresult; while (httpfetch_async_get(m_httpfetch_caller, fetchresult)) { m_httpfetch_active--; fetched_something = true; // Is this a hashset (index.mth) or a media file? if (fetchresult.request_id < m_remotes.size()) remoteHashSetReceived(fetchresult); else remoteMediaReceived(fetchresult, client); } if (fetched_something) startRemoteMediaTransfers(); // Did all remote transfers end and no new ones can be started? // If so, request still missing files from the minetest server // (Or report that we have all files.) if (m_httpfetch_active == 0) { if (m_uncached_received_count < m_uncached_count) { infostream << "Client: Failed to remote-fetch " << (m_uncached_count-m_uncached_received_count) << " files. Requesting them" << " the usual way." << std::endl; } startConventionalTransfers(client); } } } void ClientMediaDownloader::initialStep(Client *client) { // Check media cache m_uncached_count = m_files.size(); for (std::map<std::string, FileStatus*>::iterator it = m_files.begin(); it != m_files.end(); ++it) { std::string name = it->first; FileStatus *filestatus = it->second; const std::string &sha1 = filestatus->sha1; std::ostringstream tmp_os(std::ios_base::binary); bool found_in_cache = m_media_cache.load(hex_encode(sha1), tmp_os); // If found in cache, try to load it from there if (found_in_cache) { bool success = checkAndLoad(name, sha1, tmp_os.str(), true, client); if (success) { filestatus->received = true; m_uncached_count--; } } } assert(m_uncached_received_count == 0); // Create the media cache dir if we are likely to write to it if (m_uncached_count != 0) { bool did = fs::CreateAllDirs(getMediaCacheDir()); if (!did) { errorstream << "Client: " << "Could not create media cache directory: " << getMediaCacheDir() << std::endl; } } // If we found all files in the cache, report this fact to the server. // If the server reported no remote servers, immediately start // conventional transfers. Note: if cURL support is not compiled in, // m_remotes is always empty, so "!USE_CURL" is redundant but may // reduce the size of the compiled code if (!USE_CURL || m_uncached_count == 0 || m_remotes.empty()) { startConventionalTransfers(client); } else { // Otherwise start off by requesting each server's sha1 set // This is the first time we use httpfetch, so alloc a caller ID m_httpfetch_caller = httpfetch_caller_alloc(); m_httpfetch_timeout = g_settings->getS32("curl_timeout"); // Set the active fetch limit to curl_parallel_limit or 84, // whichever is greater. This gives us some leeway so that // inefficiencies in communicating with the httpfetch thread // don't slow down fetches too much. (We still want some limit // so that when the first remote server returns its hash set, // not all files are requested from that server immediately.) // One such inefficiency is that ClientMediaDownloader::step() // is only called a couple times per second, while httpfetch // might return responses much faster than that. // Note that httpfetch strictly enforces curl_parallel_limit // but at no inter-thread communication cost. This however // doesn't help with the aforementioned inefficiencies. // The signifance of 84 is that it is 2*6*9 in base 13. m_httpfetch_active_limit = g_settings->getS32("curl_parallel_limit"); m_httpfetch_active_limit = MYMAX(m_httpfetch_active_limit, 84); // Write a list of hashes that we need. This will be POSTed // to the server using Content-Type: application/octet-stream std::string required_hash_set = serializeRequiredHashSet(); // minor fixme: this loop ignores m_httpfetch_active_limit // another minor fixme, unlikely to matter in normal usage: // these index.mth fetches do (however) count against // m_httpfetch_active_limit when starting actual media file // requests, so if there are lots of remote servers that are // not responding, those will stall new media file transfers. for (u32 i = 0; i < m_remotes.size(); ++i) { assert(m_httpfetch_next_id == i); RemoteServerStatus *remote = m_remotes[i]; actionstream << "Client: Contacting remote server \"" << remote->baseurl << "\"" << std::endl; HTTPFetchRequest fetchrequest; fetchrequest.url = remote->baseurl + MTHASHSET_FILE_NAME; fetchrequest.caller = m_httpfetch_caller; fetchrequest.request_id = m_httpfetch_next_id; // == i fetchrequest.timeout = m_httpfetch_timeout; fetchrequest.connect_timeout = m_httpfetch_timeout; fetchrequest.post_fields = required_hash_set; fetchrequest.extra_headers.push_back( "Content-Type: application/octet-stream"); httpfetch_async(fetchrequest); m_httpfetch_active++; m_httpfetch_next_id++; m_outstanding_hash_sets++; } } } void ClientMediaDownloader::remoteHashSetReceived( const HTTPFetchResult &fetchresult) { u32 remote_id = fetchresult.request_id; assert(remote_id < m_remotes.size()); RemoteServerStatus *remote = m_remotes[remote_id]; m_outstanding_hash_sets--; if (fetchresult.succeeded) { try { // Server sent a list of file hashes that are // available on it, try to parse the list std::set<std::string> sha1_set; deSerializeHashSet(fetchresult.data, sha1_set); // Parsing succeeded: For every file that is // available on this server, add this server // to the available_remotes array for(std::map<std::string, FileStatus*>::iterator it = m_files.upper_bound(m_name_bound); it != m_files.end(); ++it) { FileStatus *f = it->second; if (!f->received && sha1_set.count(f->sha1)) f->available_remotes.push_back(remote_id); } } catch (SerializationError &e) { infostream << "Client: Remote server \"" << remote->baseurl << "\" sent invalid hash set: " << e.what() << std::endl; } } // For compatibility: If index.mth is not found, assume that the // server contains files named like the original files (not their sha1) if (!fetchresult.succeeded && !fetchresult.timeout && fetchresult.response_code == 404) { infostream << "Client: Enabling compatibility mode for remote " << "server \"" << remote->baseurl << "\"" << std::endl; remote->request_by_filename = true; // Assume every file is available on this server for(std::map<std::string, FileStatus*>::iterator it = m_files.upper_bound(m_name_bound); it != m_files.end(); ++it) { FileStatus *f = it->second; if (!f->received) f->available_remotes.push_back(remote_id); } } } void ClientMediaDownloader::remoteMediaReceived( const HTTPFetchResult &fetchresult, Client *client) { // Some remote server sent us a file. // -> decrement number of active fetches // -> mark file as received if fetch succeeded // -> try to load media std::string name; { std::map<unsigned long, std::string>::iterator it = m_remote_file_transfers.find(fetchresult.request_id); assert(it != m_remote_file_transfers.end()); name = it->second; m_remote_file_transfers.erase(it); } assert(m_files.count(name) != 0); FileStatus *filestatus = m_files[name]; assert(!filestatus->received); assert(filestatus->current_remote >= 0); RemoteServerStatus *remote = m_remotes[filestatus->current_remote]; filestatus->current_remote = -1; remote->active_count--; // If fetch succeeded, try to load media file if (fetchresult.succeeded) { bool success = checkAndLoad(name, filestatus->sha1, fetchresult.data, false, client); if (success) { filestatus->received = true; assert(m_uncached_received_count < m_uncached_count); m_uncached_received_count++; } } } s32 ClientMediaDownloader::selectRemoteServer(FileStatus *filestatus) { assert(filestatus != NULL); assert(!filestatus->received); assert(filestatus->current_remote < 0); if (filestatus->available_remotes.empty()) return -1; else { // Of all servers that claim to provide the file (and haven't // been unsuccessfully tried before), find the one with the // smallest number of currently active transfers s32 best = 0; s32 best_remote_id = filestatus->available_remotes[best]; s32 best_active_count = m_remotes[best_remote_id]->active_count; for (u32 i = 1; i < filestatus->available_remotes.size(); ++i) { s32 remote_id = filestatus->available_remotes[i]; s32 active_count = m_remotes[remote_id]->active_count; if (active_count < best_active_count) { best = i; best_remote_id = remote_id; best_active_count = active_count; } } filestatus->available_remotes.erase( filestatus->available_remotes.begin() + best); return best_remote_id; } } void ClientMediaDownloader::startRemoteMediaTransfers() { bool changing_name_bound = true; for (std::map<std::string, FileStatus*>::iterator files_iter = m_files.upper_bound(m_name_bound); files_iter != m_files.end(); ++files_iter) { // Abort if active fetch limit is exceeded if (m_httpfetch_active >= m_httpfetch_active_limit) break; const std::string &name = files_iter->first; FileStatus *filestatus = files_iter->second; if (!filestatus->received && filestatus->current_remote < 0) { // File has not been received yet and is not currently // being transferred. Choose a server for it. s32 remote_id = selectRemoteServer(filestatus); if (remote_id >= 0) { // Found a server, so start fetching RemoteServerStatus *remote = m_remotes[remote_id]; std::string url = remote->baseurl + (remote->request_by_filename ? name : hex_encode(filestatus->sha1)); verbosestream << "Client: " << "Requesting remote media file "