Skip to content
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
171 commits
Select commit Hold shift + click to select a range
9ec13f9
Add classifier training support
runwangdl Mar 17, 2025
f1a0491
Fix L3 DMA and Maxpool Bugs
runwangdl Mar 3, 2025
29baf2c
WIP Static Memory Allocation of IOs
Victor-Jung Mar 3, 2025
25be229
Temporary fix broken float softmax
Victor-Jung Mar 4, 2025
da56cbe
Fix lifetime of aliased input buffers
Victor-Jung Mar 4, 2025
721f747
Fix output buffer lifetime
Victor-Jung Mar 4, 2025
78685e5
Linting
Victor-Jung Mar 4, 2025
02b5435
WIP fix output buffer lifetime
Victor-Jung Mar 4, 2025
a2d67a0
Change RQHardswish dim due to compiler bug
Victor-Jung Mar 4, 2025
bdd92de
Fix typo
Victor-Jung Mar 4, 2025
20b1f8b
Fix duplicated IO in memory allocation visualization
Victor-Jung Mar 4, 2025
c708069
Fix the Constant Tensor offset to not take into account IO since they…
Victor-Jung Mar 4, 2025
b6e2448
Add new attribute to Variable and Transient buffer to annotate if the…
Victor-Jung Mar 7, 2025
7e96f18
Adapt calculateLifetime to use buffer I/O annotation
Victor-Jung Mar 7, 2025
b923520
Fix typo
Victor-Jung Mar 7, 2025
f4cb9e0
Remove IO buffer name and refactor var name
Victor-Jung Mar 13, 2025
435cc9d
Linting
Victor-Jung Mar 13, 2025
731f39f
Test the correctness of the memory map after memory allocation
Victor-Jung Mar 17, 2025
dd1370c
Allocate memory arena first
Victor-Jung Mar 17, 2025
8bfdb13
correct DMA lengh of copy assertion
runwangdl Mar 18, 2025
f01eb7f
Align memory allocation test
Victor-Jung Mar 18, 2025
031dc79
delete redundant shell scripts
runwangdl Mar 19, 2025
58e18da
Merge branch 'devel' into PULPCCTL3_16_16_64
runwangdl Mar 19, 2025
ac2d879
Update node with multioutput to single output
runwangdl Mar 19, 2025
6a7198b
add softmaxcrossentropygrad tiling
runwangdl Mar 19, 2025
360aef7
Add softmaxcrossentropylossgrad tiling
runwangdl Mar 20, 2025
bc48582
Merge branch 'PULPCCTL3_16_16_64' into GEMM_training_tiled
runwangdl Mar 20, 2025
b6542ba
Fix CI issue
runwangdl Mar 20, 2025
fe208d0
Fix CI bugs
runwangdl Mar 20, 2025
4a21359
update CI
runwangdl Mar 20, 2025
a0dcb6d
Improve memory alloc visualization
Victor-Jung Mar 20, 2025
91f12f0
Add and pass test for CCT gemmtraining 1_16_16_8 to 128
runwangdl Mar 20, 2025
d1e1ebf
update CI with 8-128 dim CCT last gemm training test
runwangdl Mar 20, 2025
86a2e99
Add SGD support for PULP Open
runwangdl Mar 20, 2025
bdacd2f
Update CCT training test with sgd
runwangdl Mar 20, 2025
b5421cc
Multi-level profiling + Linting
Victor-Jung Mar 21, 2025
99035f0
Update Changelog
runwangdl Mar 23, 2025
62e87d3
Merge branch 'devel' into GEMM_training_tiled
runwangdl Mar 23, 2025
15ea3ec
Solved issues caused by merging conflicts
runwangdl Mar 23, 2025
a644fdf
Solved Review Comments
runwangdl Mar 28, 2025
643e160
Resolving conflicts
runwangdl Mar 28, 2025
80a9518
Reresolve the conflict
runwangdl Mar 28, 2025
501775d
Solving CI issues
runwangdl Mar 28, 2025
65a56b7
fix linting errors
runwangdl Mar 28, 2025
03c3f4a
gelu sigmoid approximation
runwangdl Mar 24, 2025
7e141fd
gelu parallel + unroll
runwangdl Mar 24, 2025
c3ee783
Float Matmul Parallel on M
runwangdl Mar 24, 2025
47d8c19
Softmax Parallel and Softmax Op Support
runwangdl Mar 24, 2025
ccba380
conv parallel without im2col
runwangdl Mar 25, 2025
fafcedf
PULP Layernorm Parallel
runwangdl Mar 25, 2025
147e68f
Fixed CI issues
runwangdl Mar 28, 2025
6e07dc9
fixing linting
runwangdl Mar 28, 2025
8b2f685
Merge branch 'devel' into devel_CCT_Optim
runwangdl Apr 8, 2025
9c0b8f6
Enlarge CI floatconv tiling L1 size for 8 core and delete CCT 128 tes…
runwangdl Apr 8, 2025
4c36de2
matmul 1*4 unrolling
runwangdl Apr 24, 2025
28ec2ca
Add computeOp support for CCT necessary kernels
runwangdl Apr 24, 2025
bf1f8ae
Add openlibm expf
runwangdl Apr 13, 2025
deac9ce
add relu, mul, maxpool ops num
runwangdl May 4, 2025
3b12187
Optimize parallel for multiple kernels
runwangdl May 4, 2025
49da947
Merge branch 'devel' into devel_CCT_Optim
runwangdl May 4, 2025
47961b9
Merge branch 'devel' into devel_CCT_Optim
runwangdl May 6, 2025
8907532
Change ConvTileConstraint to only tile on outchannel
runwangdl May 6, 2025
133f9ae
Fix error in gelu
runwangdl May 6, 2025
f25127d
Fix Linting Issues
runwangdl May 6, 2025
6f3f585
Merge branch 'devel' into devel_CCT_Optim
runwangdl May 8, 2025
4ffea9b
Change CI tests
runwangdl May 8, 2025
81c3460
profilling string change to const static
runwangdl May 8, 2025
4af69de
Fix profiling dual loop issue
runwangdl May 8, 2025
e819626
Add RV32IMF Picolibc support for Siracusa platform
runwangdl May 8, 2025
fa0cc37
Build Docker for new gvsoc for testing
runwangdl May 8, 2025
ac56ca2
Gvsoc Small test
runwangdl May 8, 2025
fd6c99d
Add Redmule Platform, Engline, Tiler, and Deployer
runwangdl May 8, 2025
2862f29
Add rv32imf.txt to build docker
runwangdl May 8, 2025
9ef9cc2
Update GVSOC hash
runwangdl May 9, 2025
10de9f6
matmul delicate constraints for Redmule
runwangdl May 9, 2025
efab54c
Merge branch 'devel_CCT_Optim' into redmule_platform
runwangdl May 9, 2025
37670e6
conv with redmule
runwangdl May 9, 2025
08b7e23
Add CCT 32 test
runwangdl May 9, 2025
e42b3d6
xtensor gvsoc docker build
runwangdl May 9, 2025
823d847
add softmaxgrad tileconstraint
runwangdl May 10, 2025
212ff3c
LayernormGrad and CCT MLP Training Graph
runwangdl May 11, 2025
d7346a5
Merge branch 'devel' into exp/heterogeneous-memory-placement
runwangdl May 12, 2025
c51694b
Fix Layernormgrad
runwangdl May 12, 2025
3efa661
Add Gelugrad
runwangdl May 16, 2025
aee7651
Merge branch 'exp/heterogeneous-memory-placement' into AttentionTraining
runwangdl May 16, 2025
b40cbd7
GEMM with Redmule
runwangdl May 18, 2025
203f095
Efficient GEMM
runwangdl May 18, 2025
7835c5a
reducesum tileconstraint
runwangdl Jun 9, 2025
21294bb
temporary deactiate transposesplit otherwise kq training failed
runwangdl Jun 9, 2025
90689e2
merge devel
runwangdl Jun 13, 2025
5c3f287
gemm no bias + input in name issue for codegenerate
runwangdl Jun 19, 2025
3271c3a
Parallelization and Optimization of CCT Inference and Training Kernel…
runwangdl Jun 12, 2025
7f99f2c
Adapation for Merging Devel
runwangdl Jun 20, 2025
2b46d2d
AttentionTraining Support
runwangdl Nov 16, 2025
bc3f951
clean unwanted deeplotest
runwangdl Nov 16, 2025
fe13842
Merge branch 'devel' into AttentionTraining
runwangdl Nov 16, 2025
d867f73
Remove Redmule Content from this branch
runwangdl Nov 16, 2025
a7d6903
Fix Bugs after merge
runwangdl Nov 16, 2025
0806442
Update CCT training testcases
runwangdl Nov 25, 2025
df6e698
Add CCT2 Training to CI
runwangdl Nov 25, 2025
c28300a
Fix CI errors
runwangdl Nov 25, 2025
06fa447
Remove redundant files
runwangdl Nov 25, 2025
3b2af3f
Decrease unneccessary changes compared with devel
runwangdl Nov 26, 2025
caa751a
Fix transposesplit samenaming issue & update GEMM no bias for tiling
runwangdl Nov 26, 2025
179262e
Fixing Linting
runwangdl Nov 26, 2025
0e16453
Remove redundant changes
runwangdl Nov 26, 2025
3f5a042
Fix linting again
runwangdl Nov 26, 2025
5502d04
Merge latest devel including TinyViT tiling support
runwangdl Nov 26, 2025
d7517e2
Add GlobalAveragePool for MI_BMInet
runwangdl Nov 26, 2025
21777bd
[CNNTrain] Add averagepool
runwangdl Dec 7, 2025
04b81ac
[CNNTrain] Add AveragepoolGrad
runwangdl Dec 7, 2025
8e3bbe7
[CNNTrain] Add ConGradX
runwangdl Dec 7, 2025
26b1e1b
[CNNTraining] Stash
runwangdl Dec 8, 2025
ce5491e
[CNNTraining] ConvGradX, W,B and DW
runwangdl Dec 11, 2025
5f6813a
[CNNTraining] Convdw gradw
runwangdl Dec 11, 2025
b387b14
[CNNTraining] ReluGrad
runwangdl Dec 12, 2025
9f3e712
Link PULPTrainlib
runwangdl Dec 19, 2025
625a388
CNN-Training: CONVGradX,W,DWConvGradX,w
runwangdl Dec 20, 2025
7e2c33d
decouple convgrad with conv
runwangdl Jan 4, 2026
b0017f9
convgradw untiled nonim2col
runwangdl Jan 4, 2026
ed46217
convgradw im2col untiled
runwangdl Jan 4, 2026
78f320c
convgradw im2col tiling but need manual memset to change output data …
runwangdl Jan 4, 2026
13ba9d8
dwconvgradwhw tile and remove convbias
runwangdl Jan 5, 2026
c328687
pwconvgradw untiled and tiled
runwangdl Jan 5, 2026
313952a
Convgradx im2col tiling
runwangdl Jan 6, 2026
5b91672
dwconvgradx tiling pass
runwangdl Jan 6, 2026
0b7e165
refactor parser and template, pwconvgradx untiled
runwangdl Jan 6, 2026
8a300b4
refactor convgrad tileconstraints
runwangdl Jan 6, 2026
ee3fb9c
add convgrad layer
runwangdl Jan 8, 2026
02e8bb5
change relugrad naming to formal grad
runwangdl Jan 8, 2026
2d3f56d
sgd relu some changes
runwangdl Jan 8, 2026
025b39f
zero initialization of pwconv
runwangdl Jan 8, 2026
342a162
zero initialization of convgrad
runwangdl Jan 8, 2026
55f14ef
[CNN Training] Some missing pieces
runwangdl Jan 12, 2026
0ad72a6
change pulptrainlib to my personal repo
runwangdl Jan 12, 2026
4a44bd1
Add GroupNormalization and its gradient
runwangdl Jan 23, 2026
027d206
groupnormgradB
runwangdl Jan 25, 2026
0faead0
Fix forktransformer bug for convgradx
runwangdl Jan 25, 2026
7460074
WCCI 4 e2e trainging graph
runwangdl Jan 25, 2026
f0f7f70
fix groupnormgradxstat tileconstraint surpass l1 limit bug
runwangdl Jan 26, 2026
49cddd2
Deeploy Microbenchmark with GVSoC CSR and Demo on GEMM
runwangdl Feb 12, 2026
b260e4e
Add float concat and Change padding pattern of ConV
runwangdl Feb 13, 2026
9803232
Merge remote-tracking branch 'upstream/devel' into sleepvit
runwangdl Feb 15, 2026
c38a72a
Initial Training platform
runwangdl Feb 25, 2026
9e5957b
Updated training update with gradient accumulation and optimizer update
runwangdl Feb 26, 2026
0c4cfd7
Add MLP_Train Test
runwangdl Feb 26, 2026
78bd0df
Merge branch 'sleepvit' into TrainingPlatform
runwangdl Feb 26, 2026
36d145d
Temporal Changes for Multi-Ouput Kernels to fit the new testtraining …
runwangdl Mar 2, 2026
a89c533
Add Small Conv+Transformer Test for training untiled platform
runwangdl Mar 2, 2026
9428468
Avoid generation redundant memory copy for the same input during mult…
runwangdl Mar 2, 2026
b0e4fc2
Merge branch 'CNNTraining' into TrainingPlatform
runwangdl Mar 2, 2026
78e567c
Fix missing GroupNorm imports in PULPOpen/Bindings.py after CNNTraini…
runwangdl Mar 2, 2026
f5c8d00
Fix merge: restore CNNTraining additions dropped during conflict reso…
runwangdl Mar 3, 2026
41512e9
Fix additional merge conflicts: AveragePool bindings and duplicate de…
runwangdl Mar 3, 2026
b4a90a0
Fix duplicate LayerNormGradParser: keep 5-input TrainingPlatform version
runwangdl Mar 3, 2026
a495d3e
Wrong Free of aliased_input
runwangdl Mar 3, 2026
bf837e3
RISCV-SUMMIT Demo
runwangdl Mar 3, 2026
c2f14b2
LATEST DEMO for RISCV SUBMIT
runwangdl Mar 3, 2026
40179c2
Add training pytest
runwangdl Mar 4, 2026
b0b9c10
Pass tiled "python deeployTrainingRunner_tiled_siracusa.py \
runwangdl Mar 4, 2026
71c36ac
Pass "deeployTrainingRunner_tiled_siracusa.py -t Tests/Models/SmallTr…
runwangdl Mar 4, 2026
50ab6ff
Temporary change for sleepvit BP and Add tile traning pytest
runwangdl Mar 4, 2026
fee90a2
Reafactoring training operators
runwangdl Mar 10, 2026
022b087
Add MaxPoolGrad operator support for PULPOpen platform
runwangdl Mar 11, 2026
528a8b1
Update grad kernels
runwangdl Mar 11, 2026
4d297bb
Update Conv Bias for Train Platform
runwangdl Mar 11, 2026
7b04a97
Update pulp-trainlib submodule: ConvGrad padding/stride support
runwangdl Mar 12, 2026
8f194d2
generateTrainingNetwork: auto-infer n_accum from inputs.npz
runwangdl Mar 12, 2026
a0188d0
Add batchnormgrad, globalaveragepoolgrad
runwangdl Mar 12, 2026
5ab0fe4
Add MSELoss/Grad op, ConvGradB tiling, MaxPoolGrad ORT fix, autoencod…
runwangdl Mar 12, 2026
0229dfa
Transferring TrainingPlatform to Gap9
runwangdl Mar 16, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
The table of contents is too big for display.
Diff view
Diff view
  •  
  •  
  •  
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -11,3 +11,6 @@
[submodule "CMSIS-NN"]
path = TargetLibraries/CMSIS/third_party/CMSIS-NN
url = https://github.com/ARM-software/CMSIS-NN.git
[submodule "pulp-trainlib"]
path = TargetLibraries/PULPOpen/third_party/pulp-trainlib
url = https://github.com/runwangdl/pulp-trainlib.git
Original file line number Diff line number Diff line change
Expand Up @@ -229,7 +229,7 @@ def _NCHWtoNHWC_fun(graph: gs.Graph, match: Match, name: str, default_channels_f

if node.op in ["RequantizedConv", "Conv"]:
spatialDims = len(node.inputs[1].shape) - 2
elif node.op == "MaxPool":
elif node.op in ["MaxPool", "AveragePool", "AveragePoolGrad"]:
spatialDims = len(node.attrs["kernel_shape"])
elif node.op == "Pad":
spatialDims = 2 # Hack based on current status
Expand All @@ -245,7 +245,12 @@ def _NCHWtoNHWC_fun(graph: gs.Graph, match: Match, name: str, default_channels_f
if node.op in ["Conv", "RequantizedConv"]:
# In the case of Conv: [weights, opt. bias], RequantizedConv: [weights, mul, add, opt. shift]
for tensor in node.inputs[1:]:
_transformLayoutConst(tensor, spatialDims, default_channels_first)
if isinstance(tensor, gs.Constant):
_transformLayoutConst(tensor, spatialDims, default_channels_first)
elif isinstance(tensor, gs.Variable) and tensor.shape is not None and len(tensor.shape) >= 2:
# Trainable weight (Variable input in training graph) — insert Transpose node
perm = _transformLayoutPermutation(len(tensor.shape), spatialDims, default_channels_first)
graph.nodes.append(_appendTranspose(tensor, node, perm))

node.attrs["channels_first"] = default_channels_first

Expand All @@ -261,6 +266,24 @@ def __init__(self, default_channels_first: bool = True):
super().__init__(graph, partial(_NCHWtoNHWC_fun, default_channels_first = default_channels_first), name)


@contextagnostic
class NCHWtoNHWCAveragePoolPass(ReplaceSequentialPatternPass):

def __init__(self, default_channels_first: bool = True):
graph = _singleNodePattern(op = "AveragePool")
name = "_NCHW_TO_NHWC_AVERAGEPOOL_PASS"
super().__init__(graph, partial(_NCHWtoNHWC_fun, default_channels_first = default_channels_first), name)


@contextagnostic
class NCHWtoNHWCAveragePoolGradPass(ReplaceSequentialPatternPass):

def __init__(self, default_channels_first: bool = True):
graph = _singleNodePattern(op = "AveragePoolGrad")
name = "_NCHW_TO_NHWC_AVERAGEPOOLGRAD_PASS"
super().__init__(graph, partial(_NCHWtoNHWC_fun, default_channels_first = default_channels_first), name)


@contextagnostic
class NCHWtoNHWCConvPass(ReplaceSequentialPatternPass):

Expand Down Expand Up @@ -363,6 +386,8 @@ def __init__(self, default_channels_first: bool = True):
passes = [
NCHWtoNHWCPadPass(default_channels_first),
NCHWtoNHWCMaxPoolPass(default_channels_first),
NCHWtoNHWCAveragePoolPass(default_channels_first),
NCHWtoNHWCAveragePoolGradPass(default_channels_first),
NCHWtoNHWCDwConvPass(default_channels_first),
NCHWtoNHWCConvPass(default_channels_first),
]
Expand All @@ -376,6 +401,8 @@ def __init__(self, default_channels_first: bool = True):
passes = [
NCHWtoNHWCPadPass(default_channels_first),
NCHWtoNHWCMaxPoolPass(default_channels_first),
NCHWtoNHWCAveragePoolPass(default_channels_first),
NCHWtoNHWCAveragePoolGradPass(default_channels_first),
PULPNCHWtoNHWCDwConvPass(default_channels_first),
NCHWtoNHWCConvPass(default_channels_first),
]
Expand Down Expand Up @@ -533,8 +560,10 @@ def _remove_only_singleton_reduce_mean(graph: gs.Graph, match: Match, name: str)
# Delete node if only reduction over singleton dimensions
if 'axis' in node.attrs:
axis = node.attrs['axis']
else:
elif len(node.inputs) > 1 and node.inputs[1] is not None and hasattr(node.inputs[1], 'values') and node.inputs[1].values is not None:
axis = node.inputs[1].values
else:
return graph # axis unknown, skip

# Check if shape information is available
if node.inputs[0].shape is not None and all(node.inputs[0].shape[ax] == 1 for ax in axis):
Expand Down
24 changes: 22 additions & 2 deletions Deeploy/DeeployTypes.py
Original file line number Diff line number Diff line change
Expand Up @@ -343,7 +343,7 @@ def has_live_aliases(self, ctxt: NetworkContext) -> bool:
next = queue.pop()
buffNext = ctxt.lookup(next)
assert isinstance(buffNext, VariableBuffer)
live |= buffNext._live
live |= buffNext._live or (next in ctxt.globalObjects)
visited.add(next)
queue |= buffNext.aliases - visited
return live
Expand Down Expand Up @@ -1308,6 +1308,8 @@ def typeCheckNodeInputs(self, ctxt: NetworkContext, node: gs.Node) -> bool:
reference._type = _type
reference._instance = _type(inputNode.name, ctxt)
else:
if not hasattr(reference, '_type'):
return False
retCheck &= reference._type.referencedType == _type.referencedType
return retCheck

Expand Down Expand Up @@ -1339,7 +1341,9 @@ def annotateDict(self, ctxt: NetworkContext, node: gs.Node, operatorRepresentati
for key, value in operatorRepresentation.items():
# check if the referenced buffer is in the environment
if isinstance(value, str) and value in env:
self.typeDict[key + '_type'] = ctxt.lookup(value)._type
buf = ctxt.lookup(value)
if hasattr(buf, '_type'):
self.typeDict[key + '_type'] = buf._type

def typeCheck(self, ctxt: NetworkContext, node: gs.Node,
operatorRepresentation: OperatorRepresentation) -> Tuple[NetworkContext, bool]:
Expand Down Expand Up @@ -2086,11 +2090,15 @@ def bind(self, ctxt: NetworkContext) -> Tuple[NetworkContext, bool]:
# Update shapes and types of tensors in onnx graph based on type inference after binding
for node in (self.node.inputs + self.node.outputs):
if ctxt.is_local(node.name):
if not hasattr(ctxt.localObjects[node.name], '_type'):
continue # skip untyped buffers (e.g. ReduceSum axes, MaxPool mask)
node.shape = ctxt.localObjects[node.name].shape
npType = self._broadcastToNpType(ctxt.localObjects[node.name]._type)
if npType is not None:
node.dtype = npType
elif ctxt.is_global(node.name):
if not hasattr(ctxt.globalObjects[node.name], '_type'):
continue # skip untyped global buffers
npType = self._broadcastToNpType(ctxt.globalObjects[node.name]._type)
if isinstance(ctxt.globalObjects[node.name], ConstantBuffer):
if isinstance(node, gs.Constant):
Expand Down Expand Up @@ -2840,6 +2848,12 @@ def generateInferenceInitializationCode(self) -> str:
if isinstance(node, StructBuffer):
continue

# Skip local buffers that were registered but never typed (e.g. optional ONNX
# outputs like the MaxPool indices/mask tensor). These are not referenced by any
# template and must not be emitted as C declarations.
if not hasattr(node, '_type'):
continue

name = node.name
node.name = self.ctxt._mangle(node.name)
callStack += node.init()
Expand Down Expand Up @@ -2940,6 +2954,8 @@ def generateBufferInitializationCode(self) -> str:
callStack = ''
for node in ctxt.globalObjects.values():
if isinstance(node, VariableBuffer) and not isinstance(node, StructBuffer):
if not hasattr(node, '_type'):
continue # skip untyped buffers (e.g. ReduceSum axes constants)
assert issubclass(node._type, Pointer), f"Global VariableBuffer {node.name} is not a Pointer!"
if node._deploy:
name = node.name
Expand Down Expand Up @@ -2985,6 +3001,8 @@ def generateBufferAllocationCode(self) -> str:

for node in ctxt.globalObjects.values():
if isinstance(node, VariableBuffer) and not isinstance(node, StructBuffer):
if not hasattr(node, '_type'):
continue # skip untyped buffers (e.g. ReduceSum axes constants)
assert issubclass(node._type, Pointer), f"Global VariableBuffer {node.name} is not a Pointer!"
if node._deploy:
name = node.name
Expand Down Expand Up @@ -3522,6 +3540,8 @@ def _printMemorySummary(self):
if isinstance(_buffer, ConstantBuffer) or (isinstance(_buffer, VariableBuffer) and _buffer._deploy):
# SCHEREMO: We only
if (hasattr(_buffer, "_memoryLevel") and _buffer._memoryLevel == level) or level == "None":
if not hasattr(_buffer, '_type'):
continue # skip untyped buffers (e.g. ReduceSum axes constants)
staticSize += int((np.prod(_buffer.shape) * _buffer._type.referencedType.typeWidth // 8))
else:
log.warning(f"Buffer {_buffer.name} does not have a valid memory level")
Expand Down
Loading