Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
107 changes: 107 additions & 0 deletions 50-class-mapping.csv
Original file line number Diff line number Diff line change
@@ -0,0 +1,107 @@
,original,new,label
0,0,0,Unknown
1,2,1,Left-Cerebral-White-Matter
2,4,2,Left-Lateral-Ventricle
3,5,2,Left-Inf-Lat-Vent
4,7,3,Left-Cerebellum-White-Matter
5,8,4,Left-Cerebellum-Cortex
6,10,5,Left-Thalamus-Proper
7,11,6,Left-Caudate
8,12,7,Left-Putamen
9,13,8,Left-Pallidum
10,14,2,3rd-Ventricle
11,15,2,4th-Ventricle
12,16,9,Brain-Stem
13,17,10,Left-Hippocampus
14,18,11,Left-Amygdala
15,24,12,CSF
16,26,13,Left-Accumbens-area
17,28,14,Left-VentralDC
18,41,1,Right-Cerebral-White-Matter
19,43,2,Right-Lateral-Ventricle
20,44,2,Right-Inf-Lat-Vent
21,46,3,Right-Cerebellum-White-Matter
22,47,4,Right-Cerebellum-Cortex
23,49,5,Right-Thalamus-Proper
24,50,6,Right-Caudate
25,51,7,Right-Putamen
26,52,8,Right-Pallidum
27,53,10,Right-Hippocampus
28,54,11,Right-Amygdala
29,58,13,Right-Accumbens-area
30,60,14,Right-VentralDC
31,72,2,5th-Ventricle
32,192,15,Corpus_Callosum
33,251,15,CC_Posterior
34,252,15,CC_Mid_Posterior
35,253,15,CC_Central
36,254,15,CC_Mid_Anterior
37,255,15,CC_Anterior
38,1001,16,ctx-lh-bankssts
39,1002,17,ctx-lh-caudalanteriorcingulate
40,1003,18,ctx-lh-caudalmiddlefrontal
41,1005,19,ctx-lh-cuneus
42,1006,20,ctx-lh-entorhinal
43,1007,21,ctx-lh-fusiform
44,1008,22,ctx-lh-inferiorparietal
45,1009,23,ctx-lh-inferiortemporal
46,1010,24,ctx-lh-isthmuscingulate
47,1011,25,ctx-lh-lateraloccipital
48,1012,26,ctx-lh-lateralorbitofrontal
49,1013,27,ctx-lh-lingual
50,1014,28,ctx-lh-medialorbitofrontal
51,1015,29,ctx-lh-middletemporal
52,1016,30,ctx-lh-parahippocampal
53,1017,31,ctx-lh-paracentral
54,1018,32,ctx-lh-parsopercularis
55,1019,33,ctx-lh-parsorbitalis
56,1020,34,ctx-lh-parstriangularis
57,1021,35,ctx-lh-pericalcarine
58,1022,36,ctx-lh-postcentral
59,1023,37,ctx-lh-posteriorcingulate
60,1024,38,ctx-lh-precentral
61,1025,39,ctx-lh-precuneus
62,1026,40,ctx-lh-rostralanteriorcingulate
63,1027,41,ctx-lh-rostralmiddlefrontal
64,1028,42,ctx-lh-superiorfrontal
65,1029,43,ctx-lh-superiorparietal
66,1030,44,ctx-lh-superiortemporal
67,1031,45,ctx-lh-supramarginal
68,1032,46,ctx-lh-frontalpole
69,1033,47,ctx-lh-temporalpole
70,1034,48,ctx-lh-transversetemporal
71,1035,49,ctx-lh-insula
72,2001,16,ctx-rh-bankssts
73,2002,17,ctx-rh-caudalanteriorcingulate
74,2003,18,ctx-rh-caudalmiddlefrontal
75,2005,19,ctx-rh-cuneus
76,2006,20,ctx-rh-entorhinal
77,2007,21,ctx-rh-fusiform
78,2008,22,ctx-rh-inferiorparietal
79,2009,23,ctx-rh-inferiortemporal
80,2010,24,ctx-rh-isthmuscingulate
81,2011,25,ctx-rh-lateraloccipital
82,2012,26,ctx-rh-lateralorbitofrontal
83,2013,27,ctx-rh-lingual
84,2014,28,ctx-rh-medialorbitofrontal
85,2015,29,ctx-rh-middletemporal
86,2016,30,ctx-rh-parahippocampal
87,2017,31,ctx-rh-paracentral
88,2018,32,ctx-rh-parsopercularis
89,2019,33,ctx-rh-parsorbitalis
90,2020,34,ctx-rh-parstriangularis
91,2021,35,ctx-rh-pericalcarine
92,2022,36,ctx-rh-postcentral
93,2023,37,ctx-rh-posteriorcingulate
94,2024,38,ctx-rh-precentral
95,2025,39,ctx-rh-precuneus
96,2026,40,ctx-rh-rostralanteriorcingulate
97,2027,41,ctx-rh-rostralmiddlefrontal
98,2028,42,ctx-rh-superiorfrontal
99,2029,43,ctx-rh-superiorparietal
100,2030,44,ctx-rh-superiortemporal
101,2031,45,ctx-rh-supramarginal
102,2032,46,ctx-rh-frontalpole
103,2033,47,ctx-rh-temporalpole
104,2034,48,ctx-rh-transversetemporal
105,2035,49,ctx-rh-insul
20 changes: 20 additions & 0 deletions bayesian_utils.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.callbacks import Callback
import tensorflow_probability as tfp
import math

def normal_prior(mu, prior_std):
"""Defines distributions prior for Bayesian neural network.
Simply set tf.zeros(shape, dtype) with a new mu value for any new distribution as needed
I tried Normal, He, Xavier.
"""
def prior_fn(dtype, shape, name, trainable, add_variable_fn):
tfd = tfp.distributions
dist = tfd.Normal(loc=tf.zeros(shape, dtype),
scale=dtype.as_numpy_dtype((prior_std)))
batch_ndims = tf.size(input=dist.batch_shape_tensor())
return tfd.Independent(dist, reinterpreted_batch_ndims=batch_ndims)
return prior_fn
66 changes: 66 additions & 0 deletions data_loader.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
"""
Created on Tue Feb 23 18:34:40 2021

@author: aakanksha
"""
# Imports
import nobrainer
import tensorflow as tf
import sys
import json
import glob
import numpy as np
import pandas as pd
import os
import warnings
import nibabel as nib



def _to_blocks(x, y,block_shape):
"""Separate `x` into blocks and repeat `y` by number of blocks."""
print(x.shape)
x = nobrainer.volume.to_blocks(x, block_shape)
y = nobrainer.volume.to_blocks(y, block_shape)
return (x, y)

def get_dict(n_classes):
print('Conversion into {} segmentation classes from freesurfer labels to 0-{}'.format(n_classes,n_classes-1))
if n_classes == 50:
tmp = pd.read_csv('50-class-mapping.csv')
mydict = dict(tuple(zip(tmp['original'],tmp['new'])))
return mydict
elif n_classes == 115:
tmp = pd.read_csv('115-class-mapping.csv', header=0,usecols=[0,1],dtype=np.int32)
mydict = dict(tuple(zip(tmp['original'],tmp['new'])))
return mydict
else: raise(NotImplementedError)

def process_dataset(dset,batch_size,block_shape,n_classes,train= True):
# Standard score the features.
dset = dset.map(lambda x, y: (nobrainer.volume.standardize(x), nobrainer.volume.replace(y,get_dict(n_classes))))
# Separate features into blocks.
dset = dset.map(lambda x, y:_to_blocks(x,y,block_shape))
# This step is necessary because separating into blocks adds a dimension.
dset = dset.unbatch()
#dset = dset.shuffle(buffer_size=100)
# Only shuffle the dset for training
if train:
dset = dset.shuffle(buffer_size=100)
else:
pass
# Add a grayscale channel to the features.
dset = dset.map(lambda x, y: (tf.expand_dims(x, -1), y))
# Batch features and labels.
dset = dset.batch(batch_size, drop_remainder=True)
return dset

def get_dataset(pattern,volume_shape,batch,block_shape,n_classes,train=True):
dataset = nobrainer.dataset.tfrecord_dataset(
file_pattern=glob.glob(pattern),
volume_shape=volume_shape,
shuffle=False,
scalar_label=False,
compressed=True)
dataset = process_dataset(dataset,batch,block_shape,n_classes,train=train)
return dataset