-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathkeyvalues.h
More file actions
107 lines (97 loc) · 2.97 KB
/
keyvalues.h
File metadata and controls
107 lines (97 loc) · 2.97 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
/**
* keyvalues.h
*
* Provides a function to load a "BuildManifest" from a .vdf file
* using Tyti's VDF parser. We store the results into a list of
* VPKKeyValues_t. This replicates the original "KeyValues" usage.
*/
#ifndef KEYVALUES_H
#define KEYVALUES_H
#include <atomic>
#include <string>
#include <vector>
#include "packedstore.h"
#include <queue>
#include <mutex>
#include <thread>
#include <condition_variable>
// We can include Tyti's VDF parser. E.g. if you have "tyti_vdf_parser.h"
#include "tyti_vdf_parser.h"
// ------------------------------------------------------------------
// LoadKeyValuesManifest:
// Expects a top-level object "BuildManifest" with multiple children.
// Each child’s name = local filesystem path
// Contains fields:
// "preloadSize"
// "loadFlags"
// "textureFlags"
// "useCompression"
// "deDuplicate"
// ------------------------------------------------------------------
bool LoadKeyValuesManifest(const std::string& vdfPath, std::vector<VPKKeyValues_t>& outList);
class ThreadPool {
public:
ThreadPool(size_t numThreads)
: stop(false), tasksInProgress(0)
{
for (size_t i = 0; i < numThreads; i++)
{
workers.emplace_back([this](){
while (true)
{
std::function<void()> task;
{
std::unique_lock<std::mutex> lock(queueMutex);
condition.wait(lock, [this](){ return stop || !tasks.empty(); });
if (stop && tasks.empty())
return;
task = std::move(tasks.front());
tasks.pop();
tasksInProgress++;
}
task();
tasksInProgress--;
waitCondition.notify_all();
}
});
}
}
~ThreadPool()
{
{
std::unique_lock<std::mutex> lock(queueMutex);
stop = true;
}
condition.notify_all();
for (std::thread &worker : workers)
worker.join();
}
// Enqueue a task into the pool.
void enqueue(std::function<void()> task)
{
{
std::unique_lock<std::mutex> lock(queueMutex);
tasks.push(std::move(task));
}
condition.notify_one();
}
// Block until all tasks have completed.
void wait()
{
std::unique_lock<std::mutex> lock(waitMutex);
waitCondition.wait(lock, [this](){
std::unique_lock<std::mutex> lock(queueMutex);
return tasks.empty() && tasksInProgress.load() == 0;
});
}
private:
std::vector<std::thread> workers;
std::queue<std::function<void()>> tasks;
std::mutex queueMutex;
std::condition_variable condition;
bool stop;
std::mutex waitMutex;
std::condition_variable waitCondition;
std::atomic<int> tasksInProgress;
};
#endif // KEYVALUES_H