-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathpolySmoof.R
More file actions
92 lines (78 loc) · 3.34 KB
/
polySmoof.R
File metadata and controls
92 lines (78 loc) · 3.34 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
library(polynom)
library(ParamHelpers)
library(checkmate)
source("functions.R")
#Dane wejściowe
library(pracma)
grid_Ackley <- meshgrid(seq(-32, 32, by=0.645))
grid_Aluffi <- meshgrid(seq(-10, 10, by=0.202))
grid_Gold <- meshgrid(seq(-2, 2, by=0.0402))
#Wszystkie permutacje elementów dwoch sekwencji
input_Ackley <- cbind(as.vector(grid_Ackley[[1]]), as.vector(grid_Ackley[[2]]))
input_Aluffi <- cbind(as.vector(grid_Aluffi[[1]]), as.vector(grid_Aluffi[[2]]))
input_Gold <- cbind(as.vector(grid_Gold[[1]]), as.vector(grid_Gold[[2]]))
#SMOOF
#TRAIN
area <- input_Aluffi
y <- generateAluffiPentini(area)
train_data = data.frame(X1 = area[,1], X2 = area[,2], Y = y)
#tworzenie modelu
model <- lm(y~ poly(X1, X2, degree =6, raw=TRUE), data=train_data)
#Błędy zbioru treningowego
intervals <- predict(model, interval='confidence', level = 0.99)
MSEerr_Aluffi <- mse(intervals - train_data$Y)
MAE_Aluffi <- mae(intervals - train_data$Y)
#TEST
set.seed(210)
arguments <- 40000
area2 <- matrix(data = runif(arguments,-10,10), nrow=arguments/2, ncol=2)
y2 <- generateAluffiPentini(area2)
test_data = data.frame(X1 = area2[,1], X2 = area2[,2], Y = y2)
#Błędy zbioru testowego
intervals_T <- predict(model, newdata = test_data, interval='confidence', level = 0.99)
MSEerr_Aluffi_T <- mse(intervals_T - test_data$Y)
MAE_Aluffi_T <- mae(intervals_T - test_data$Y)
summary(model)
###############################################################################################################
#CEC2013
#TRAIN
area <- input_Ackley
y <- generateShiftedAndRotatedAckley(area)
train_data = data.frame(X1 = area[,1], X2 = area[,2], Y = y)
#tworzenie modelu
model <- lm(y~ poly(X1,X2, degree =10, raw=TRUE), data=train_data)
#Błędy zbioru treningowego
intervals <- predict(model, interval='confidence', level = 0.99)
MSEerr_Ackley <- mse(intervals - train_data$Y)
MAE_Ackley <- mae(intervals - train_data$Y)
set.seed(210)
arguments <- 40000
area2 <- matrix(data = runif(arguments,-32,32), nrow=arguments/2, ncol=2)
y2 <- generateShiftedAndRotatedAckley(area2)
test_data = data.frame(X1 = area2[,1], X2 = area2[,2], Y = y2)
#Błędy zbioru testowego
intervals_T <- predict(model, newdata = test_data, interval='confidence', level = 0.99)
MSEerr_Ackley_T <- mse(intervals_T - test_data$Y)
MAE_Ackley_T <- mae(intervals_T - test_data$Y)
##########################################################################################
#GlobalOpt
#TRAIN
area <- input_Gold
y <- generateGoldPrice(area)
train_data = data.frame(X1 = area[,1], X2 = area[,2], Y = y)
#tworzenie modelu
model <- lm(y~ poly(X1,X2, degree =15, raw=TRUE), data=train_data)
#Błędy zbioru treningowego
intervals <- predict(model, interval='confidence', level = 0.99)
MSEerr_Gold <- mse(intervals - train_data$Y)
MAE_Gold <- mae(intervals - train_data$Y)
set.seed(210)
arguments <- 40000
area2 <- matrix(data = runif(arguments,-2,2), nrow=arguments/2, ncol=2)
y2 <- generateGoldPrice(area2)
test_data = data.frame(X1 = area2[,1], X2 = area2[,2], Y = y2)
#Błędy zbioru testowego
intervals_T <- predict(model, newdata = test_data, interval='confidence', level = 0.99)
MSEerr_Gold_T <- mse(intervals_T - test_data$Y)
MAE_Gold_T <- mae(intervals_T - test_data$Y)
##################################################################################################