-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmylinear.py
More file actions
404 lines (319 loc) · 16 KB
/
mylinear.py
File metadata and controls
404 lines (319 loc) · 16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
import torch
import math
import torch.nn as nn
import sys
import gc
from torch.nn.parameter import Parameter
from torch.nn import init
from torch.nn import functional as F
import mixgemm
try:
from EETQ import quant_weights, w8_a16_gemm
memory_bound_eetq_linear = True
except:
memory_bound_eetq_linear = False
# memory_bound_eetq_linear = False
def FindOutliers(Activation, sigma = None):
if sigma is None:
sigma = 6
tmp = torch.unique(torch.where(( Activation.abs() > sigma ))[1])
return tmp.to(torch.int32)
layer_id = 0
class MixLinear_GEMM(nn.Module):
def __init__(self, in_features, out_features, bias = True,
device=None):
super().__init__()
global layer_id
layer_id += 1
self.layer_id = layer_id
dtype = torch.float16
factory_kwargs = {'device': device, 'dtype': dtype, "requires_grad": False}
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.weight = Parameter(torch.empty((out_features, in_features), **factory_kwargs), requires_grad = False)
self.q_scale_col = torch.empty((1, out_features), **factory_kwargs)
self.q_weight = torch.empty((1, 1), dtype = torch.int8)
if bias:
self.bias = Parameter(torch.empty(out_features, **factory_kwargs))
else:
self.register_parameter('bias', None)
self.reset_parameters()
self.weight_cache = None
self.ind = None
self.init = False
self.quanted = False
self.n_outliers = 0
self.cnt = 0
self.output = torch.zeros((1) , dtype = torch.float16)
self.y1 = None
self.reuse_output_because_of_zeros_input = False
self.last_input = None
self.cache_computed = False
self.q_scale_col = None
self.input_scales = None
self.reuse_scaling_factor = False
self.scale_max = None
self.scale_min = None
self.doing_estimation = True
def reset_parameters(self) -> None:
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in) if fan_in > 0 else 0
init.uniform_(self.bias, -bound, bound)
def forward(self, input):
"""
To do
(1) I should write a kernel to accelerated the code : if (input[[1],0:32,0:8] - self.last_input).sum() == 0:
2025.1.17: DONE
not required now ---------- I use my locality algorithm
(2) I should do some further analyse of max value pattern
2025.1.8 : DONE
no need ! failed! No locality.....
(3) INT8 GEMM kernel:
todo 1: autotune by triton : github.com/AlibabaPAI/FLASHNN/blob/main/flashnn/triton_kernels/gemm_a8w8.py
todo 2: tune by cutlass param : add gemm shape in param see in git log
(4) FP8 GEMM kernel:
todo : test in H100
2025.1.8 : DONE
I implemented in mylinearfp8.py
and change debug = True in the file to get the profile result
(5) optimize:
input = input.contiguous()
this only affects the computing scaling factor and quantization steps
so we should add support for these two kernels with in_contiguous input
(6) fuse bias and scaling kernel together:
2025.1.4 done!
see in file /home/chenyidong/seperated_kernel/kernel/symmetric/epilogue/thread/linear_combination_dequant.h
(7) load sparse outliers from global to RF instread of global to shared
developing
in file /home/chenyidong/seperated_kernel/kernel/mixgemm.cu __global___ void mma_sparse_A_dense_B_kernel
"""
assert input.dtype == torch.float16
if self.init is not True:
if self.layer_id == 1:
print("I am init the weight do not disturb me and count me during time estimation")
if len(input.shape) == 3:
M = input.shape[0] * input.shape[1]
self.input_scales = torch.zeros((M, 1), dtype = torch.float32, device = input.device)
self.init = True
computed_bound = False
self.find_zeros = torch.zeros((1,), dtype = torch.int32, pin_memory = True)
self.reuse_output = torch.zeros((1,), dtype = torch.int32, pin_memory = True)
self.last_input = torch.zeros((1, 32, 8 ), dtype = torch.float16, device = input.device)
if len(input.shape) == 3 and input.shape[0] * input.shape[1] > 32:
computed_bound = True
if len(input.shape) == 2 and input.shape[0] > 32:
computed_bound = True
if not computed_bound:
if memory_bound_eetq_linear:
int8_weight_cpu = torch.t(self.weight.data).contiguous().cpu()
int8_weight, scales = quant_weights(int8_weight_cpu, torch.int8, False)
self.eetq_weight = (int8_weight).cuda()
self.eetq_scale_col = (scales.half()).cuda()
self.weight.data = self.weight.data.cpu()
del self.weight
if computed_bound :
# print("I should quant this layer")
tmp = input.reshape(-1, input.shape[-1])
local_ind = FindOutliers(tmp)
# print(local_ind)
self.n_outliers = len(local_ind)
if self.n_outliers == 0:
#print("without outliers hhh !")
self.weight.data = self.weight.data.cpu()
tmp = self.weight.data
else:
# pass
self.weight_cache = self.weight.data[:, local_ind]
self.ind = local_ind
tmp = self.weight.cpu()
tmp[:,local_ind] = 0
# self.weight.data = self.weight.data.cpu()
# del self.weight
self.q_scale_col = (torch.max(torch.abs(tmp), dim=1)[0].unsqueeze(1) / (127)).to(torch.float16).reshape((1,self.out_features))
tmp /= self.q_scale_col.T
tmp = torch.clamp(tmp, -128, 127)
self.q_weight = tmp.round().to(torch.int8).cuda()
self.q_scale_col = self.q_scale_col.cuda().reshape((self.out_features))
# 把 bias 打包到 scale 里面
if self.bias is not None:
tmp = self.bias
else:
tmp = torch.zeros((self.out_features), dtype= torch.float16, device= input.device)
# print(self.q_scale_col)
# print(self.bias)
tmp = torch.cat([self.q_scale_col, tmp]).reshape((2, self.out_features))
self.q_scale_col = tmp.t().contiguous().cuda()
self.quanted = True
self.weight.data = self.weight.data.cpu()
# 删除原来的权重
# del self.weight
# self.weight = torch.zeros(1, dtype = torch.float16)
del tmp
# print("my layer id is %d\t"%(self.layer_id))
#
# if self.cnt > 4:
# exit()
if self.quanted is False :
self.cnt += 1
if memory_bound_eetq_linear:
y = w8_a16_gemm(input, self.eetq_weight, self.eetq_scale_col)
if self.bias is not None:
y += self.bias
return y
return F.linear(input, self.weight, self.bias)
else:
assert ( len(input.shape) == 3)
input_shape0 = input.shape[0]
input_shape1 = input.shape[1]
M = input.shape[0] * input.shape[1]
K = self.in_features
N = self.out_features
# to optimize the in continues memory!
# tmp = torch.zeros(input.shape)
if not input.is_contiguous():
input = input.contiguous()
if self.cnt >= 50:
self.cnt = 0
self.find_zeros[0] = 0
# self.reuse_output_because_of_zeros_input = False
# release cache
self.cache_computed = False
# if self.reuse_output_because_of_zeros_input:
# return self.y1
# if self.reuse_output_because_of_zeros_input and self.y1 is not None:
# return self.y1
if self.cnt == 0 and input.shape[0] == 2:
# self.last_input = input[[1],0:32,0:8] is input[0,0,0] is 0
mixgemm.find_zeros(self.find_zeros, input, input.shape[0], input.shape[1], K, self.last_input)
# if input[0,0,0] == 0:
# self.last_input = input[[1],0:32,0:8]
if self.cnt == 1 and input.shape[0] == 2:
if self.find_zeros[0] == 1:
# if (input[[1],0:32,0:8] - self.last_input).sum() == 0:
# self.reuse_output_because_of_zeros_input = True
mixgemm.reuse_output(self.reuse_output, input, input.shape[0], input.shape[1], K, self.last_input)
if self.reuse_output[0] == 1:
self.reuse_output_because_of_zeros_input = True
self.cnt += 1
# if self.cnt == 50:
# print(self.scale_history[0:self.cnt])
""""
during this analysis we found that the location of the max index keeps vary stable
so we should not compute the scaling factors at each steps?
maybe we could try it
2025.1.7
sorry
not work
"""
if self.reuse_output_because_of_zeros_input is True and self.cache_computed:
return self.y1
if self.n_outliers == 0:
if self.reuse_scaling_factor:
if self.cnt == 0:
y1 = mixgemm.mixgemmforward_direct(M, N, K,
input,
self.input_scales,
self.q_weight,
self.q_scale_col,
input_shape0, input_shape1)
else:
y1 = mixgemm.mixgemmforward_direct_with_scaling(M, N, K,
input,
self.input_scales,
self.q_weight,
self.q_scale_col,
input_shape0,
input_shape1 )
else:
# 记录最大和最小scale
# if self.doing_estimation:
# current_scale = torch.amax(input)
# if self.scale_max is None:
# self.scale_max = current_scale
# self.scale_min = current_scale
# self.scale_max = max (self.scale_max, current_scale)
# self.scale_min = min (self.scale_min, current_scale)
# if self.cnt == 49:
# self.doing_estimation = False
# if self.scale_max / self.scale_min < 1.5:
# self.reuse_scaling_factor = True
y1 = mixgemm.mixgemmforward_direct(M, N, K,
input,
self.input_scales,
self.q_weight,
self.q_scale_col,
input_shape0, input_shape1)
debug = 0
use_ops = 0
if debug:
import mixlib
if use_ops:
scaleRow = torch.zeros((M, 1) , dtype= torch.float32, device= input.device)
q_xcache = mixlib.FindRowScaleF32(input, scaleRow, M, K, 8)
self.q_scale_col = self.q_scale_col.cuda().to(torch.float32)
start_event = torch.cuda.Event(enable_timing=True)
end_event = torch.cuda.Event(enable_timing=True)
torch.cuda.synchronize()
start_event.record()
for i in range(10):
y1 = mixlib.mixgemmforward_direct(M, N, K,
input,
self.q_weight,
self.q_scale_col)
if use_ops:
from vllm import _custom_ops as ops
y1 = ops.cutlass_scaled_mm(
q_xcache,
self.q_weight.T,
out_dtype=torch.float16,
scale_a=scaleRow,
scale_b=self.q_scale_col,
bias = self.bias
)
end_event.record()
torch.cuda.synchronize()
ms4 = start_event.elapsed_time(end_event)
if debug:
start_event.record()
for i in range(10):
y1 = F.linear(input, self.weight, self.bias)
end_event.record()
torch.cuda.synchronize()
ms5 = start_event.elapsed_time(end_event)
print("int8 time = %.8f fp16 time = %.8f, %d %d %d"%(ms4, ms5, M, N, K))
else:
y1 = mixgemm.mixgemmforward_dynamic(M, N, K,
input,
self.q_weight,
self.q_scale_col,
input_shape0, input_shape1,
self.weight_cache,
self.ind, self.n_outliers)
# assert (False)
# q_xcache, scaleRow, outliers = mixlib.FindRowScaleFusedExtracOutliersF32(input,
# self.ind, len(self.ind), M , K )
# y1 = ops.cutlass_scaled_mm(
# q_xcache,
# self.q_weight.T,
# out_dtype=torch.float16,
# scale_a=scaleRow,
# scale_b=self.q_scale_col,
# bias = self.bias
# ) + torch.mm(outliers, self.weight_cache.T)
# optimize 把 bias 打包到 scale 里面
# if self.bias is not None:
# y1 += self.bias
# optimize 1: opt the output shape
# if self.layer_id == 800:
# print(tmp[0])
# print(y1[0, 0:2, 0:10])
# if self.reuse_output_because_of_zeros_input and self.y1 is not None:
if self.reuse_output_because_of_zeros_input:
self.cache_computed = True
self.y1 = y1
return y1
def extra_repr(self) -> str:
return f'in_features={self.in_features}, out_features={self.out_features}, bias={self.bias is not None}'