-
Notifications
You must be signed in to change notification settings - Fork 4
Expand file tree
/
Copy pathemotion_sample.py
More file actions
90 lines (71 loc) · 3.06 KB
/
emotion_sample.py
File metadata and controls
90 lines (71 loc) · 3.06 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
import librosa
import soundfile
import os, glob, pickle
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.neural_network import MLPClassifier
from sklearn.metrics import accuracy_score
# Extract features (mfcc, chroma, mel) from a sound file
# Core part, VVIP function for emotional detector
def extract_feature(file_name, mfcc, chroma, mel):
with soundfile.SoundFile(file_name) as sound_file:
X = sound_file.read(dtype='float32')
sample_rate = sound_file.samplerate
if chroma:
stft = np.abs(librosa.stft(X))
result = np.array([])
if mfcc:
mfcc = np.mean(librosa.feature.mfcc(y = X, sr = sample_rate, n_mfcc = 40).T, axis = 0)
result = np.hstack((result, mfcc)) # stacks arrays in sequence horizontally(in a columnar fashion)
if chroma:
chroma = np.mean(librosa.feature.chroma_stft(S=stft, sr = sample_rate).T, axis = 0)
result = np.hstack((result, chroma))
if mel:
mel = np.mean(librosa.feature.melspectrogram(X, sr = sample_rate).T, axis = 0)
result = np.hstack((result, mel))
return result
# Load the data and extract features for each sound file
def load_data(emotions, observed_emotions, test_size = 0.2):
x, y = [], []
for file in glob.glob('/home/deokgyu.ahn/practice/Resource/Code/emotion/duck_emotion/ravdess_data/Actor_*/*.wav'):
file_name = os.path.basename(file)
emotion = emotions[file_name.split("-")[2]]
if emotion not in observed_emotions:
continue
feature = extract_feature(file, mfcc=True, chroma=True, mel=True)
x.append(feature)
y.append(emotion)
return train_test_split(np.array(x), y, test_size=test_size, random_state=9)
def main():
# Emotions in the RAVDESS dataset
emotions = {
'01' : 'neutral',
'02' : 'calm',
'03' : 'happy',
'04' : 'sad',
'05' : 'angry',
'06' : 'fearful',
'07' : 'disgust',
'08' : 'surprised',
}
observed_emotions = ['neutral', 'angry']
# Split the dataset
x_train, x_test, y_train, y_test = load_data(emotions, observed_emotions, test_size = 0.25)
# Get the shape of the training and testing datasets
print((x_train.shape[0], x_test.shape[0]))
print(f'Features extracted: {x_train.shape[1]}')
#initialize the Multi Layer Perceptron Classifier
model = MLPClassifier(alpha = 0.01, batch_size = 256, epsilon= 1e-08,
hidden_layer_sizes = (300, ), learning_rate = 'adaptive', max_iter=500)
# Duck's note : If you want to increase the accuracy, make this network deeper
# Rather than using crude MLPClssifier
# Train the model
model.fit(x_train, y_train)
# Predict for the test set
y_pred=model.predict(x_test)
# Calculate the accuracy of our model
accuracy = accuracy_score(y_true=y_test, y_pred=y_pred)
# Print the accuracy
print("Accuracy: {:.2f}%".format(accuracy*100))
if __name__ == "__main__":
main()