-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathSparseBlockTemporalMaxPooling.lua
More file actions
121 lines (92 loc) · 3.44 KB
/
SparseBlockTemporalMaxPooling.lua
File metadata and controls
121 lines (92 loc) · 3.44 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
local SparseBlockTemporalMaxPooling, parent = torch.class('nn.SparseBlockTemporalMaxPooling', 'nn.Module')
function SparseBlockTemporalMaxPooling:__init(kW, dW, isRelax)
dW = dW or kW
self.kW = kW
self.dW = dW
self.isRelax = isRelax or false
end
function SparseBlockTemporalMaxPooling:pri_ensureOutput(input)
if self.output ~= nil then
return
end
self.output = { nBatchSize = input.nBatchSize, taData = {} }
local nColumns = table.getn(input.taData)
for i=1, nColumns do
local taInputCurr = input.taData[i]
taOutputCurr = { teValue = torch.Tensor(),
teRowIdx = taInputCurr.teRowIdx }
table.insert(self.output.taData, taOutputCurr)
end
end
function SparseBlockTemporalMaxPooling:pri_ensureTaIndices(input)
if self.taIndices ~= nil then
return
end
self.taIndices = {}
local nColumns = table.getn(input.taData)
for i=1, nColumns do
local taInputCurr = input.taData[i]
table.insert(self.taIndices, torch.Tensor()) -- Important Note: due to a bug in torch+nn, this is reverted back (instead of torch.LongTensor()). For this to work, need to be using torch @ commit: 1e5a315d03c91286d859512574d3b0b25e12d512, and nn @ commit: 1443cd7c2becd793b3d954144dcf4a1bf9947771
end
end
function SparseBlockTemporalMaxPooling:pri_updateOutput_column(taInput, taOutput, teIndices)
local input = taInput.teValue
local output = taOutput.teValue
local kW = self.kW
if self.isRelax then
kW = math.min(input:size(2), kW)
end
input.THNN.TemporalMaxPooling_updateOutput(
input:cdata(), output:cdata(),
teIndices:cdata(), kW, self.dW
)
end
function SparseBlockTemporalMaxPooling:updateOutput(input)
self:pri_ensureTaIndices(input)
self:pri_ensureOutput(input)
local nColumns = table.getn(self.output.taData)
for i=1, nColumns do
self:pri_updateOutput_column(input.taData[i],
self.output.taData[i],
self.taIndices[i])
end
return self.output
end
function SparseBlockTemporalMaxPooling:pri_ensureGradInput(input)
if self.gradInput ~= nil then
return
end
self.gradInput = { nBatchSize = input.nBatchSize, taData = {} }
local nColumns = table.getn(input.taData)
for i=1, nColumns do
local taInputCurr = input.taData[i]
taGradInputCurr = { teValue = torch.Tensor(),
teRowIdx = taInputCurr.teRowIdx }
table.insert(self.gradInput.taData, taGradInputCurr)
end
end
function SparseBlockTemporalMaxPooling:pri_updateGradInput_column(taInput, taGradOutput, taGradInput, teIndices)
local input = taInput.teValue
local gradOutput = taGradOutput.teValue
local gradInput = taGradInput.teValue
local kW = self.kW
if self.isRelax then
kW = math.min(input:size(2), kW)
end
input.THNN.TemporalMaxPooling_updateGradInput(
input:cdata(), gradOutput:cdata(),
gradInput:cdata(), teIndices:cdata(),
kW, self.dW
)
end
function SparseBlockTemporalMaxPooling:updateGradInput(input, gradOutput)
self:pri_ensureGradInput(input)
local nColumns = table.getn(self.gradInput.taData)
for i=1, nColumns do
self:pri_updateGradInput_column(input.taData[i],
gradOutput.taData[i],
self.gradInput.taData[i],
self.taIndices[i])
end
return self.gradInput
end