Skip to content

Commit

Permalink
Merge pull request #185 from jss2a98aj/backport-4.4-fs
Browse files Browse the repository at this point in the history
[backport] Assorted file system and file system dock improvements
  • Loading branch information
jss2a98aj authored Dec 12, 2024
2 parents 5e2ca71 + a0cdca2 commit 4190d1f
Show file tree
Hide file tree
Showing 26 changed files with 734 additions and 488 deletions.
3 changes: 1 addition & 2 deletions core/debugger/remote_debugger_peer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -144,9 +144,8 @@ void RemoteDebuggerPeerTCP::_read_in() {
Error err = decode_variant(var, buf, in_pos, &read);
ERR_CONTINUE(read != in_pos || err != OK);
ERR_CONTINUE_MSG(var.get_type() != Variant::ARRAY, "Malformed packet received, not an Array.");
mutex.lock();
MutexLock lock(mutex);
in_queue.push_back(var);
mutex.unlock();
}
}
}
Expand Down
99 changes: 46 additions & 53 deletions core/io/resource.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,32 +60,32 @@ void Resource::set_path(const String &p_path, bool p_take_over) {
p_take_over = false; // Can't take over an empty path
}

ResourceCache::lock.lock();
{
MutexLock lock(ResourceCache::lock);

if (!path_cache.is_empty()) {
ResourceCache::resources.erase(path_cache);
}
if (!path_cache.is_empty()) {
ResourceCache::resources.erase(path_cache);
}

path_cache = "";
path_cache = "";

Ref<Resource> existing = ResourceCache::get_ref(p_path);
Ref<Resource> existing = ResourceCache::get_ref(p_path);

if (existing.is_valid()) {
if (p_take_over) {
existing->path_cache = String();
ResourceCache::resources.erase(p_path);
} else {
ResourceCache::lock.unlock();
ERR_FAIL_MSG("Another resource is loaded from path '" + p_path + "' (possible cyclic resource inclusion).");
if (existing.is_valid()) {
if (p_take_over) {
existing->path_cache = String();
ResourceCache::resources.erase(p_path);
} else {
ERR_FAIL_MSG("Another resource is loaded from path '" + p_path + "' (possible cyclic resource inclusion).");
}
}
}

path_cache = p_path;
path_cache = p_path;

if (!path_cache.is_empty()) {
ResourceCache::resources[path_cache] = this;
if (!path_cache.is_empty()) {
ResourceCache::resources[path_cache] = this;
}
}
ResourceCache::lock.unlock();

_resource_path_changed();
}
Expand Down Expand Up @@ -492,15 +492,13 @@ void Resource::set_as_translation_remapped(bool p_remapped) {
return;
}

ResourceCache::lock.lock();
MutexLock lock(ResourceCache::lock);

if (p_remapped) {
ResourceLoader::remapped_list.add(&remapped_list);
} else {
ResourceLoader::remapped_list.remove(&remapped_list);
}

ResourceCache::lock.unlock();
}

#ifdef TOOLS_ENABLED
Expand Down Expand Up @@ -573,14 +571,13 @@ Resource::~Resource() {
return;
}

ResourceCache::lock.lock();
MutexLock lock(ResourceCache::lock);
// Only unregister from the cache if this is the actual resource listed there.
// (Other resources can have the same value in `path_cache` if loaded with `CACHE_IGNORE`.)
HashMap<String, Resource *>::Iterator E = ResourceCache::resources.find(path_cache);
if (likely(E && E->value == this)) {
ResourceCache::resources.remove(E);
}
ResourceCache::lock.unlock();
}

HashMap<String, Resource *> ResourceCache::resources;
Expand Down Expand Up @@ -609,18 +606,20 @@ void ResourceCache::clear() {
}

bool ResourceCache::has(const String &p_path) {
lock.lock();
Resource **res = nullptr;

Resource **res = resources.getptr(p_path);
{
MutexLock mutex_lock(lock);

if (res && (*res)->get_reference_count() == 0) {
// This resource is in the process of being deleted, ignore its existence.
(*res)->path_cache = String();
resources.erase(p_path);
res = nullptr;
}
res = resources.getptr(p_path);

lock.unlock();
if (res && (*res)->get_reference_count() == 0) {
// This resource is in the process of being deleted, ignore its existence.
(*res)->path_cache = String();
resources.erase(p_path);
res = nullptr;
}
}

if (!res) {
return false;
Expand All @@ -631,28 +630,27 @@ bool ResourceCache::has(const String &p_path) {

Ref<Resource> ResourceCache::get_ref(const String &p_path) {
Ref<Resource> ref;
lock.lock();

Resource **res = resources.getptr(p_path);
{
MutexLock mutex_lock(lock);
Resource **res = resources.getptr(p_path);

if (res) {
ref = Ref<Resource>(*res);
}
if (res) {
ref = Ref<Resource>(*res);
}

if (res && !ref.is_valid()) {
// This resource is in the process of being deleted, ignore its existence
(*res)->path_cache = String();
resources.erase(p_path);
res = nullptr;
if (res && !ref.is_valid()) {
// This resource is in the process of being deleted, ignore its existence
(*res)->path_cache = String();
resources.erase(p_path);
res = nullptr;
}
}

lock.unlock();

return ref;
}

void ResourceCache::get_cached_resources(List<Ref<Resource>> *p_resources) {
lock.lock();
MutexLock mutex_lock(lock);

LocalVector<String> to_remove;

Expand All @@ -672,14 +670,9 @@ void ResourceCache::get_cached_resources(List<Ref<Resource>> *p_resources) {
for (const String &E : to_remove) {
resources.erase(E);
}

lock.unlock();
}

int ResourceCache::get_cached_resource_count() {
lock.lock();
int rc = resources.size();
lock.unlock();

return rc;
MutexLock mutex_lock(lock);
return resources.size();
}
11 changes: 3 additions & 8 deletions core/object/object.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -1904,7 +1904,7 @@ void Object::set_instance_binding(void *p_token, void *p_binding, const GDExtens

void *Object::get_instance_binding(void *p_token, const GDExtensionInstanceBindingCallbacks *p_callbacks) {
void *binding = nullptr;
_instance_binding_mutex.lock();
MutexLock instance_binding_lock(_instance_binding_mutex);
for (uint32_t i = 0; i < _instance_binding_count; i++) {
if (_instance_bindings[i].token == p_token) {
binding = _instance_bindings[i].binding;
Expand Down Expand Up @@ -1935,29 +1935,25 @@ void *Object::get_instance_binding(void *p_token, const GDExtensionInstanceBindi
_instance_binding_count++;
}

_instance_binding_mutex.unlock();

return binding;
}

bool Object::has_instance_binding(void *p_token) {
bool found = false;
_instance_binding_mutex.lock();
MutexLock instance_binding_lock(_instance_binding_mutex);
for (uint32_t i = 0; i < _instance_binding_count; i++) {
if (_instance_bindings[i].token == p_token) {
found = true;
break;
}
}

_instance_binding_mutex.unlock();

return found;
}

void Object::free_instance_binding(void *p_token) {
bool found = false;
_instance_binding_mutex.lock();
MutexLock instance_binding_lock(_instance_binding_mutex);
for (uint32_t i = 0; i < _instance_binding_count; i++) {
if (!found && _instance_bindings[i].token == p_token) {
if (_instance_bindings[i].free_callback) {
Expand All @@ -1976,7 +1972,6 @@ void Object::free_instance_binding(void *p_token) {
if (found) {
_instance_binding_count--;
}
_instance_binding_mutex.unlock();
}

#ifdef TOOLS_ENABLED
Expand Down
3 changes: 1 addition & 2 deletions core/object/object.h
Original file line number Diff line number Diff line change
Expand Up @@ -680,15 +680,14 @@ class Object {
_FORCE_INLINE_ bool _instance_binding_reference(bool p_reference) {
bool can_die = true;
if (_instance_bindings) {
_instance_binding_mutex.lock();
MutexLock instance_binding_lock(_instance_binding_mutex);
for (uint32_t i = 0; i < _instance_binding_count; i++) {
if (_instance_bindings[i].reference_callback) {
if (!_instance_bindings[i].reference_callback(_instance_bindings[i].token, _instance_bindings[i].binding, p_reference)) {
can_die = false;
}
}
}
_instance_binding_mutex.unlock();
}
return can_die;
}
Expand Down
37 changes: 10 additions & 27 deletions core/object/worker_thread_pool.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -127,9 +127,8 @@ void WorkerThreadPool::_process_task(Task *p_task) {

if (finished_users == max_users) {
// Get rid of the group, because nobody else is using it.
task_mutex.lock();
MutexLock task_lock(task_mutex);
group_allocator.free(p_task->group);
task_mutex.unlock();
}

// For groups, tasks get rid of themselves.
Expand Down Expand Up @@ -349,17 +348,13 @@ WorkerThreadPool::TaskID WorkerThreadPool::add_task(const Callable &p_action, bo
}

bool WorkerThreadPool::is_task_completed(TaskID p_task_id) const {
task_mutex.lock();
MutexLock task_lock(task_mutex);
const Task *const *taskp = tasks.getptr(p_task_id);
if (!taskp) {
task_mutex.unlock();
ERR_FAIL_V_MSG(false, "Invalid Task ID"); // Invalid task
}

bool completed = (*taskp)->completed;
task_mutex.unlock();

return completed;
return (*taskp)->completed;
}

Error WorkerThreadPool::wait_for_task_completion(TaskID p_task_id) {
Expand Down Expand Up @@ -522,10 +517,9 @@ void WorkerThreadPool::yield() {
}

void WorkerThreadPool::notify_yield_over(TaskID p_task_id) {
task_mutex.lock();
MutexLock task_lock(task_mutex);
Task **taskp = tasks.getptr(p_task_id);
if (!taskp) {
task_mutex.unlock();
ERR_FAIL_MSG("Invalid Task ID.");
}
Task *task = *taskp;
Expand All @@ -534,16 +528,13 @@ void WorkerThreadPool::notify_yield_over(TaskID p_task_id) {
// This avoids a race condition where a task is created and yield-over called before it's processed.
task->pending_notify_yield_over = true;
}
task_mutex.unlock();
return;
}

ThreadData &td = threads[task->pool_thread_index];
td.yield_is_over = true;
td.signaled = true;
td.cond_var.notify_one();

task_mutex.unlock();
}

WorkerThreadPool::GroupID WorkerThreadPool::_add_group_task(const Callable &p_callable, void (*p_func)(void *, uint32_t), void *p_userdata, BaseTemplateUserdata *p_template_userdata, int p_elements, int p_tasks, bool p_high_priority, const String &p_description) {
Expand Down Expand Up @@ -601,26 +592,20 @@ WorkerThreadPool::GroupID WorkerThreadPool::add_group_task(const Callable &p_act
}

uint32_t WorkerThreadPool::get_group_processed_element_count(GroupID p_group) const {
task_mutex.lock();
MutexLock task_lock(task_mutex);
const Group *const *groupp = groups.getptr(p_group);
if (!groupp) {
task_mutex.unlock();
ERR_FAIL_V_MSG(0, "Invalid Group ID");
}
uint32_t elements = (*groupp)->completed_index.get();
task_mutex.unlock();
return elements;
return (*groupp)->completed_index.get();
}
bool WorkerThreadPool::is_group_task_completed(GroupID p_group) const {
task_mutex.lock();
MutexLock task_lock(task_mutex);
const Group *const *groupp = groups.getptr(p_group);
if (!groupp) {
task_mutex.unlock();
ERR_FAIL_V_MSG(false, "Invalid Group ID");
}
bool completed = (*groupp)->completed.is_set();
task_mutex.unlock();
return completed;
return (*groupp)->completed.is_set();
}

void WorkerThreadPool::wait_for_group_task_completion(GroupID p_group) {
Expand All @@ -644,15 +629,13 @@ void WorkerThreadPool::wait_for_group_task_completion(GroupID p_group) {

if (finished_users == max_users) {
// All tasks using this group are gone (finished before the group), so clear the group too.
task_mutex.lock();
MutexLock task_lock(task_mutex);
group_allocator.free(group);
task_mutex.unlock();
}
}

task_mutex.lock(); // This mutex is needed when Physics 2D and/or 3D is selected to run on a separate thread.
MutexLock task_lock(task_mutex); // This mutex is needed when Physics 2D and/or 3D is selected to run on a separate thread.
groups.erase(p_group);
task_mutex.unlock();
#endif
}

Expand Down
Loading

0 comments on commit 4190d1f

Please sign in to comment.