forked from projectceladon/nn_gpu
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathdevice.cpp
More file actions
executable file
·130 lines (105 loc) · 3.05 KB
/
device.cpp
File metadata and controls
executable file
·130 lines (105 loc) · 3.05 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
#include <algorithm>
#include <memory.h>
#include <string.h>
#include <hidl/LegacySupport.h>
#include <thread>
#include "device.h"
#include "prepare_model.h"
#include "executor_manager.h"
#include "ValidateHal.h"
NAME_SPACE_BEGIN
using namespace android::nn;
Return<void> Device::getCapabilities(getCapabilities_cb cb)
{
NN_GPU_ENTRY();
NN_GPU_EXIT();
return Void();
}
Return<void> Device::getCapabilities_1_1(getCapabilities_1_1_cb cb)
{
NN_GPU_ENTRY();
Capabilities capabilities;
ExecutorManager::getCapabilities(capabilities);
cb(ErrorStatus::NONE, capabilities);
NN_GPU_EXIT();
return Void();
}
Return<void> Device::getSupportedOperations(const V10_Model& model,
getSupportedOperations_cb cb)
{
NN_GPU_ENTRY();
NN_GPU_EXIT();
return Void();
}
Return<void> Device::getSupportedOperations_1_1(const Model& model,
getSupportedOperations_1_1_cb cb)
{
NN_GPU_ENTRY();
if (!validateModel(model))
{
std::vector<bool> supported;
cb(ErrorStatus::INVALID_ARGUMENT, supported);
return Void();
}
std::vector<bool> supported = ExecutorManager::getSupportedOperations(model);
cb(ErrorStatus::NONE, supported);
NN_GPU_EXIT();
return Void();
}
Return<ErrorStatus> Device::prepareModel(const V10_Model& model,
const sp<IPreparedModelCallback>& callback)
{
NN_GPU_ENTRY();
NN_GPU_EXIT();
return ErrorStatus::NONE;
}
Return<ErrorStatus> Device::prepareModel_1_1(const Model& model,
ExecutionPreference preference,
const sp<IPreparedModelCallback>& callback)
{
NN_GPU_ENTRY();
if (callback.get() == nullptr)
{
LOGE("invalid callback passed to prepareModel");
return ErrorStatus::INVALID_ARGUMENT;
}
if (!validateModel(model) || !validateExecutionPreference(preference))
{
callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
return ErrorStatus::INVALID_ARGUMENT;
}
sp<PreparedModel> preparedModel = new PreparedModel(model);
if (!preparedModel->initialize())
{
callback->notify(ErrorStatus::INVALID_ARGUMENT, nullptr);
return ErrorStatus::INVALID_ARGUMENT;
}
callback->notify(ErrorStatus::NONE, preparedModel);
NN_GPU_EXIT();
return ErrorStatus::NONE;
}
Return<DeviceStatus> Device::getStatus()
{
NN_GPU_CALL();
return DeviceStatus::AVAILABLE;
}
int Device::run()
{
NN_GPU_CALL();
if (!ExecutorManager::initPerProcess())
{
LOGE("Unable to do ExecutorManager::initPerProcess, service exited!");
return 1;
}
android::hardware::configureRpcThreadpool(4, true);
if (registerAsService(mName) != android::OK)
{
LOGE("Could not register service");
return 1;
}
android::hardware::joinRpcThreadpool();
LOGE("Service exited!");
ExecutorManager::deinitPerProcess();
return 1;
}
NAME_SPACE_STOP