diff --git a/.gitignore b/.gitignore index fe13233..2a4d014 100644 --- a/.gitignore +++ b/.gitignore @@ -17,4 +17,7 @@ listing.tex research/ tests/test_results tests/experiments -experiments/ \ No newline at end of file +experiments/ +benchmarks/conv_nn/cifar10_train_data +benchmarks/conv_nn/cifar10_test_data +benchmarks/conv_nn/init.pkl \ No newline at end of file diff --git a/benchmarks/__pycache__/conv_nn.cpython-311.pyc b/benchmarks/__pycache__/conv_nn.cpython-311.pyc new file mode 100644 index 0000000..300553c Binary files /dev/null and b/benchmarks/__pycache__/conv_nn.cpython-311.pyc differ diff --git a/benchmarks/conv_nn.py b/benchmarks/conv_nn.py deleted file mode 100644 index 378fdfc..0000000 --- a/benchmarks/conv_nn.py +++ /dev/null @@ -1,396 +0,0 @@ -import os -import tempfile -import pickle - -from waggon import functions as f - -import torch -import torch.nn as nn -import torch.optim as optim -from torchvision import transforms -from torchvision.datasets import CIFAR10 -from torch.utils.data import Dataset, DataLoader -from torchview import draw_graph # For visualizing model architecture - -import numpy as np -import matplotlib.pyplot as plt -from tqdm import tqdm -from scipy.spatial import distance -from datetime import datetime - - -def print_dict(d): print('\n'.join(f"{k:<{max(len(str(k)) for k in d)}} : {v}" for k, v in d.items()) if d else "empty") - -def objective_CIFAR_10(param, logging=False, ray_tune=False): - NUM_EPOCHS = 10 - BATCH_SIZE = 64 - DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.247, 0.243, 0.261]) - ]) - - train_dataset = CIFAR10Dataset( - root_dir="extracted_data/cifar-10-batches-py", - train=True, - transform=transform - ) - - test_dataset = CIFAR10Dataset( - root_dir="extracted_data/cifar-10-batches-py", - train=False, - transform=transform - ) - - train_loader = DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True) - test_loader = DataLoader(test_dataset, batch_size=BATCH_SIZE, shuffle=False) - - model = CIFAR10Model( - input_size=32, - input_channels=3, - conv1_out=param[0], - conv1_kernel=param[1], - pool1_kernel=param[2], - conv2_out=param[3], - pool2_kernel=param[4], - fc1_size=param[5], - num_classes=10 - ).to(DEVICE) - - criterion = nn.CrossEntropyLoss() - optimizer = optim.Adam(model.parameters(), lr=param[6]) - best_metric = 1e-2 - - - for epoch in range(NUM_EPOCHS): - model.train() - for images, labels in train_loader: - images, labels = images.to(DEVICE), labels.to(DEVICE) - - outputs = model(images) - loss = criterion(outputs, labels) - - optimizer.zero_grad() - loss.backward() - optimizer.step() - val_loss, accuracy = model.validate(test_loader, criterion, DEVICE) - - if ray_tune: - with tempfile.TemporaryDirectory() as checkpoint_dir: - # (Path(checkpoint_dir) / "data.ckpt").write_text(str(step)) - tune.report( - {"iterations": step, "accuracy": score}, - checkpoint=tune.Checkpoint.from_directory(checkpoint_dir) - ) - - if accuracy > best_metric: - best_metric = accuracy - if logging: - print(f'Epoch {epoch+1} Validation Loss: {val_loss:.4f}, Accuracy: {accuracy:.2f}%') - - - # if self.minimise: - # return -1.0 * best_metric - # else: - return best_metric - -class CIFAR10Model(nn.Module): - def __init__( - self, - input_size=32, - input_channels=3, - conv1_out=32, - conv1_kernel=3, - padding_1=1, - pool1_kernel=2, - pool1_stride=2, - conv2_out=64, - conv2_kernel=3, - padding_2=1, - pool2_kernel=2, - pool2_stride=2, - - batch_size=64, - apply_avg_pooling=0, - - fc1_size=512, - negative_slope=0.15, - dropout_size=0.1, - num_classes=10 - ): - super().__init__() - - self.convolutions = nn.Sequential( - nn.Conv2d(input_channels, conv1_out, kernel_size=conv1_kernel, padding=padding_1), - nn.LeakyReLU(negative_slope=negative_slope), - nn.BatchNorm2d(conv1_out), - nn.Dropout(dropout_size), - nn.MaxPool2d(pool1_kernel, stride=pool1_stride), - nn.Conv2d(conv1_out, conv2_out, kernel_size=conv2_kernel, padding=padding_2), - nn.LeakyReLU(negative_slope=negative_slope), - nn.BatchNorm2d(conv2_out) - ) - - self.avg_pooling = nn.AdaptiveAvgPool2d((1,1)) if apply_avg_pooling == 1 else nn.Identity() - self.flatten = nn.Flatten() - - meta_tensor = torch.zeros(1, input_channels, input_size, input_size) - meta_tensor = self.convolutions(meta_tensor) - meta_tensor = self.avg_pooling(meta_tensor) - meta_tensor = self.flatten(meta_tensor) - out_features = meta_tensor.shape[1] - - self.classifier = nn.Sequential( - nn.Linear(out_features, fc1_size), - nn.LeakyReLU(negative_slope=negative_slope), - nn.Linear(fc1_size, num_classes) - ) - - def forward(self, x=None): - x = self.convolutions(x) - # x = x.view(x.size(0), -1) - - x = self.avg_pooling(x) - x = self.flatten(x) - - x = self.classifier(x) - return x - - def validate(self, val_loader, criterion, device): - self.eval() - val_loss = 0.0 - correct = 0 - total = 0 - with torch.no_grad(): - for inputs, labels in val_loader: - inputs, labels = inputs.to(device), labels.to(device) - outputs = self(inputs) - loss = criterion(outputs, labels) - val_loss += loss.item() - _, predicted = outputs.max(1) - total += labels.size(0) - correct += predicted.eq(labels).sum().item() - self.train() - return val_loss / len(val_loader), 100. * correct / total - -class ConvNN(f.Function): - def __init__(self, n_obs=1, model=1, minimise=True, logging=False, ray_tune=False, plot=False): - super(f.Function, self).__init__() - - self.search_params = { - 'conv_1_size': 32, - 'conv_1_kernel': 5, - 'padding_1': 4, - 'maxpool_size': 2, - 'conv2_size': 16, - 'padding_2': 3, - 'conv_2_kernel': 3, - 'fc_layer': 125, - 'learning_rate': 3e-4, - 'batch_size': 64, - 'negative_slope': 0.25, - 'dropout_prob': 0.25, - 'optimizer_Adam': 1, # ['Adam' -> 1, 'SGD' -> 0] - 'apply_avg_pooling': 1 # ['Apply' -> 1, 'use fc instead' -> 0] - } - self.domain_unscaled = [ - [ 1, 128], # conv_1_size - [ 1, 18], # conv_1_kernel - [ 0, 20], # padding_1 - [ 1, 3], # maxpool_size - [ 1, 128], # conv2_size - [ 0, 10], # padding_2 - [ 1, 5], # conv_2_kernel - [ 10, 1024], # fc_layer - [1e-6, 1e-2], # learning_rate - [ 4, 512], # batch_size - # [ 0.0, 0.99], # negative_slope - # [ 0.0, 0.5], # dropout_prob - # [ 0.0, 1.0], # optimizer_Adam - # [ 0.0, 1.0], # apply_avg_pooling - ] - self.dim = len(self.domain_unscaled) - self.domain = np.tile([0., 1.], reps=(self.dim,1)) - self.plot = plot - self.name = 'classifier' - self.f = lambda x: self.__call__(x) - self.log_transform = False - self.log_eps = 1e-8 - self.sigma = 1e-1 - self.n_obs = n_obs - self.model = model - self.minimise = minimise - self.seed = 73 - self.ray_tune = ray_tune - self.logging = logging - self.f_min = 0 # None - self.glob_min = np.zeros(self.dim).reshape(1, -1) # None - - def __call__(self, params : np.array): - NUM_EPOCHS = 10 - DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.247, 0.243, 0.261]) - ]) - - # ----- dataset creation ----- - train_dataset = CIFAR10( - root='./cifar10_train_data', - train=True, - transform=transform, - download=True - ) - test_dataset = CIFAR10( - root='./cifar10_test_data', - train=False, - transform=transform, - download=True - ) - - results = [] - search_space = np.copy(params) - for param in search_space: - # scaling params to actual values - for domain_idx in range(len(self.domain)): - if type(self.domain_unscaled[domain_idx][1]) == int: - # print(f'{param[domain_idx]} , {self.domain_unscaled[domain_idx][1]} and {self.domain_unscaled[domain_idx][0]}') - param[domain_idx] = int(param[domain_idx] * (self.domain_unscaled[domain_idx][1] - self.domain_unscaled[domain_idx][0]) + self.domain_unscaled[domain_idx][0]) - else: - # print(f'{param[domain_idx]} , {self.domain_unscaled[domain_idx][1]} and {self.domain_unscaled[domain_idx][0]}') - param[domain_idx] = param[domain_idx] * (self.domain_unscaled[domain_idx][1] - self.domain_unscaled[domain_idx][0]) + self.domain_unscaled[domain_idx][0] - - config = { - 'conv_1_size': int(param[0]), - 'conv_1_kernel': int(param[1]), - 'padding_1': int(param[2]), - 'maxpool_size': int(param[3]), - 'conv2_size': int(param[4]), - 'padding_2': int(param[5]), - 'conv_2_kernel': int(param[6]), - 'fc_layer': int(param[7]), - 'learning_rate': param[8], - 'batch_size': int(param[9]), - # 'negative_slope': param[10], - # 'dropout_prob': param[11], - # 'optimizer_Adam': int(param[12] >= 0.5), # ['Adam' -> 1, 'SGD' -> 0], - # 'apply_avg_pooling': int(param[13] >= 0.5) # ['Apply polling' -> 1, 'Use FC' -> 0] - } - - if self.logging: - print_dict(config) - - train_loader = DataLoader(train_dataset, batch_size=config['batch_size'], shuffle=True) - test_loader = DataLoader(test_dataset, batch_size=config['batch_size'], shuffle=False) - - model = CIFAR10Model( - input_size=32, - input_channels=3, - - conv1_out=config['conv_1_size'], - conv1_kernel=config['conv_1_kernel'], - padding_1=config['padding_1'], - - pool1_kernel=config['maxpool_size'], - batch_size=config['batch_size'], - - conv2_out=config['conv2_size'], - conv2_kernel=config['conv_2_kernel'], - padding_2=config['padding_2'], - - fc1_size=config['fc_layer'], - # negative_slope=config['negative_slope'], - # dropout_size=config['dropout_prob'], - - # apply_avg_pooling=config['apply_avg_pooling'], - - num_classes=10 - ).to(DEVICE) - - # ----- plot the model architecture ----- - if self.plot: - timestamp = datetime.now().strftime("%d_%H_%M") - filename = f"cifar10_model_graph_{timestamp}.png" - - model_graph = draw_graph( - CIFAR10Model(), - input_size=(1, 3, 32, 32), - expand_nested=True, - save_graph=True, - filename=filename - ) - - # ----- model training ----- - # if config['optimizer_Adam'] == 1: - # optimizer = optim.Adam(model.parameters(), lr=config['learning_rate']) - # else: - # optimizer = optim.SGD(model.parameters(), lr=config['learning_rate']) - - criterion = nn.CrossEntropyLoss() - optimizer = optim.Adam(model.parameters(), lr=config['learning_rate']) - best_metric = 1e-2 - - for epoch in tqdm(range(NUM_EPOCHS), desc=f'Training'): - model.train() - for images, labels in train_loader: - images, labels = images.to(DEVICE), labels.to(DEVICE) - - optimizer.zero_grad() - outputs = model(images) - loss = criterion(outputs, labels) - loss.backward() - optimizer.step() - - val_loss, accuracy = model.validate(test_loader, criterion, device=DEVICE) - if accuracy > best_metric: - best_metric = accuracy - - if self.logging: - print(f'Epoch {epoch+1} Validation Loss: {val_loss:.4f}, Accuracy: {accuracy:.2f}%') - results.append(best_metric) - - results = np.array(results).reshape(-1, 1) - - if self.minimise: - return -1.0 * results - else: - return results - - def sample(self, vectors_of_params): - # vectors_of_params = vectors_of_params[0] - - return vectors_of_params, self.__call__(vectors_of_params) - - - # X, y = None, None - # for n in vectors_of_params: - # X_ = np.array(self.n_obs * [n]) - # n = torch.tensor(n) - - # x = torch.normal(torch.mean(n), torch.std(n), (self.n_obs, n.shape[-1])) - # proba = self.model(x).detach().cpu().numpy() - # y_ = self.f([n]) + proba - - # if X is None: - # X, y = X_, y_ - # else: - # X = np.concatenate((X, X_)) - # y = np.concatenate((y, y_)) - - # for n in vectors_of_params: - # X_ = np.array(self.n_obs * [n]) - # n = torch.tensor(n) - - # x = torch.normal(torch.mean(n), torch.std(n), (self.n_obs, n.shape[-1])) - # proba = self.model(x).detach().cpu().numpy() - # y_ = self.f([n]) + proba - - # if X is None: - # X, y = X_, y_ - # else: - # X = np.concatenate((X, X_)) - # y = np.concatenate((y, y_)) - - # return X, y - diff --git a/benchmarks/conv_nn/__pycache__/conv_nn.cpython-311.pyc b/benchmarks/conv_nn/__pycache__/conv_nn.cpython-311.pyc new file mode 100644 index 0000000..701d5cb Binary files /dev/null and b/benchmarks/conv_nn/__pycache__/conv_nn.cpython-311.pyc differ diff --git a/benchmarks/conv_nn/conv_nn.py b/benchmarks/conv_nn/conv_nn.py new file mode 100644 index 0000000..32aafbf --- /dev/null +++ b/benchmarks/conv_nn/conv_nn.py @@ -0,0 +1,296 @@ +from waggon import functions as f + +import torch +import torch.nn as nn +import torch.optim as optim +from torchvision import transforms +from torchvision.datasets import CIFAR10 +from torch.utils.data import DataLoader +from torchview import draw_graph # For visualizing model architecture + +import numpy as np +from tqdm import tqdm +from scipy.stats import qmc +from datetime import datetime + + +class LeNet5(nn.Module): + def __init__( + self, + # input layer + input_size=32, + input_channels=3, + + # C1 + conv1_out=6, + conv1_kernel=5, + + # S2 + pool1_kernel=2, + pool1_stride=1, + + # C3 + conv2_out=16, + conv2_kernel=5, + + # S4 + pool2_kernel=2, + pool2_stride=2, + + # C5 + conv3_out=120, + conv3_kernel=5, + + # F6 + fc_out_size=84, + + num_classes=10 + ): + super().__init__() + + # Convolution layers + self.convolutions = nn.Sequential( + nn.Conv2d(input_channels, conv1_out, kernel_size=conv1_kernel), + nn.ReLU(), + nn.AvgPool2d(pool1_kernel, stride=pool1_stride), + + nn.Conv2d(conv1_out, conv2_out, kernel_size=conv2_kernel), + nn.ReLU(), + nn.AvgPool2d(pool2_kernel, stride=pool2_stride), + + nn.Conv2d(conv2_out, conv3_out, kernel_size=conv3_kernel), + nn.ReLU(), + ) + self.flatten = nn.Flatten() + + # Calc output shape of convolutions + meta_tensor = torch.zeros(1, input_channels, input_size, input_size) + meta_tensor = self.convolutions(meta_tensor) + meta_tensor = self.flatten(meta_tensor) + out_features = meta_tensor.shape[1] + + # Fully connected layers + self.classifier = nn.Sequential( + nn.Linear(out_features, fc_out_size), + nn.ReLU(), + nn.Linear(fc_out_size, num_classes) + ) + + def forward(self, x=None): + x = self.convolutions(x) + x = self.flatten(x) + x = self.classifier(x) + return x + + def validate(self, val_loader, criterion, device): + self.eval() + val_loss = 0.0 + correct = 0 + total = 0 + with torch.no_grad(): + for inputs, labels in val_loader: + inputs, labels = inputs.to(device), labels.to(device) + outputs = self(inputs) + loss = criterion(outputs, labels) + val_loss += loss.item() + _, predicted = outputs.max(1) + total += labels.size(0) + correct += predicted.eq(labels).sum().item() + self.train() + return val_loss / len(val_loader), 100. * correct / total + +class ConvNN(f.Function): + def __init__(self, n_obs=1, minimise=True, verbose=0, plot=False): + super(f.Function, self).__init__() + + self.default_search_params = { + 'conv1_out' : 6, + 'conv1_kernel' : 5, + 'pool1_kernel' : 2, + 'pool1_stride' : 1, + 'conv2_out' : 16, + 'conv2_kernel' : 5, + 'pool2_kernel' : 2, + 'pool2_stride' : 2, + 'conv3_out' : 120, + 'conv3_kernel' : 5, + 'fc_out_size' : 84, + 'learning_rate' : 3e-4, + 'batch_size' : 64, + 'optimizer_Adam': 1, # ['Adam' -> 1, 'SGD' -> 0] + } + self.domain_unscaled = [ + [ 1, 32], # conv1_out + [ 2, 12], # conv1_kernel + [ 2, 5], # pool1_kernel + [ 1, 3], # pool1_stride + [ 32, 64], # conv2_out + [ 2, 8], # conv2_kernel + [ 2, 5], # pool2_kernel + [ 1, 3], # pool2_stride + [ 64, 128], # conv3_out + [ 2, 6], # conv3_kernel + [ 256, 1024], # fc_out_size + [1e-6, 1e-3], # learning_rate + [ 32, 512], # batch_size + # [ 0.0, 1.0], # optimizer_Adam + ] + self.dim = len(self.domain_unscaled) + self.domain = np.tile([0., 1.], reps=(self.dim,1)) + self.plot = plot + self.name = 'classifier' + self.f = lambda x: self.__call__(x) + self.log_transform = False + self.log_eps = 1e-8 + self.sigma = 1e-1 + self.n_obs = n_obs + self.minimise = minimise + self.seed = 73 + self.verbose = verbose + + # load dataset + self.train_dataset, self.test_dataset = self.__load_cifar10( + 'benchmarks/conv_nn/cifar10_train_data', + 'benchmarks/conv_nn/cifar10_test_data' + ) + + def __call__(self, params: np.array): + NUM_EPOCHS = 10 + DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") + + results = [] + search_space = np.copy(params) + + # scaling params to actual values (in floats) + for i in range(search_space.shape[1]): + search_space[:, i] = qmc.scale(search_space[:, i].reshape(search_space.shape[0], 1), self.domain_unscaled[i][0], self.domain_unscaled[i][1]).squeeze() + + for point in search_space: + # set configuration + config = { + 'conv1_out' : int(point[0]), + 'conv1_kernel' : int(point[1]), + 'pool1_kernel' : int(point[2]), + 'pool1_stride' : int(point[3]), + 'conv2_out' : int(point[4]), + 'conv2_kernel' : int(point[5]), + 'pool2_kernel' : int(point[6]), + 'pool2_stride' : int(point[7]), + 'conv3_out' : int(point[8]), + 'conv3_kernel' : int(point[9]), + 'fc_out_size' : int(point[10]), + 'learning_rate' : point([11]), + 'batch_size' : int(point[12]), + # 'optimizer_Adam': int(point[13] >= 0.5), # ['Adam' -> 1, 'SGD' -> 0] + } + + if self.verbose > 0: + print('Current parameters for training:') + self.__print_dict(config) + + train_loader = DataLoader(self.train_dataset, batch_size=config['batch_size'], shuffle=True) + test_loader = DataLoader(self.test_dataset, batch_size=config['batch_size'], shuffle=False) + + model = LeNet5( + input_size=32, + input_channels=3, + + conv1_out=config['conv1_out'], + conv1_kernel=config['conv1_kernel'], + + pool1_kernel=config['pool1_kernel'], + pool1_stride=config['pool1_stride'], + + conv2_out=config['conv2_out'], + conv2_kernel=config['conv2_kernel'], + + pool2_kernel=config['pool2_kernel'], + pool2_stride=config['pool2_stride'], + + conv3_out=config['conv3_out'], + conv3_kernel=config['conv3_kernel'], + + fc_out_size=config['fc_out_size'], + ).to(DEVICE) + + # ----- plot the model architecture ----- + if self.plot: + timestamp = datetime.now().strftime("%d_%H_%M") + filename = f"cifar10_model_graph_{timestamp}.png" + + model_graph = draw_graph( + LeNet5(), + input_size=(1, 3, 32, 32), + expand_nested=True, + save_graph=True, + filename=filename + ) + + # ----- model training ----- + # if config['optimizer_Adam'] == 1: + # optimizer = optim.Adam(model.parameters(), lr=config['learning_rate']) + # else: + # optimizer = optim.SGD(model.parameters(), lr=config['learning_rate']) + + criterion = nn.CrossEntropyLoss() + optimizer = optim.Adam(model.parameters(), lr=config['learning_rate']) + best_metric = 1e-2 + + if self.verbose > 0: + train_loop = tqdm(range(NUM_EPOCHS), desc='Model training') + else: + train_loop = range(NUM_EPOCHS) + + for epoch in train_loop: + model.train() + for images, labels in train_loader: + images, labels = images.to(DEVICE), labels.to(DEVICE) + + optimizer.zero_grad() + outputs = model(images) + loss = criterion(outputs, labels) + loss.backward() + optimizer.step() + + val_loss, accuracy = model.validate(test_loader, criterion, device=DEVICE) + if accuracy > best_metric: + best_metric = accuracy + + if self.verbose > 0: + desc = f'Model training; Loss: {val_loss:.4f}, Accuracy: {accuracy:.2f}' + train_loop.set_description(desc) + results.append(best_metric) + results = np.array(results).reshape(-1, 1) + + if self.minimise: + return -1.0 * results + else: + return results + + def sample(self, vectors_of_params): + return vectors_of_params, self.__call__(vectors_of_params) + + def __print_dict(self, d): + print('\n'.join(f"{k:<{max(len(str(k)) for k in d)}} : {v}" for k, v in d.items()) if d else "empty") + + def __load_cifar10(self, train_path, test_path): + transform = transforms.Compose([ + transforms.ToTensor(), + transforms.Normalize(mean=[0.4914, 0.4822, 0.4465], std=[0.247, 0.243, 0.261]) + ]) + + train_dataset = CIFAR10( + root=train_path, + train=True, + transform=transform, + download=True + ) + test_dataset = CIFAR10( + root=test_path, + train=False, + transform=transform, + download=True + ) + + return train_dataset, test_dataset + \ No newline at end of file diff --git a/benchmarks/conv_nn/test.py b/benchmarks/conv_nn/test.py new file mode 100644 index 0000000..aef2daf --- /dev/null +++ b/benchmarks/conv_nn/test.py @@ -0,0 +1,46 @@ +import os +import pickle +import numpy as np +import conv_nn as conv + +from waggon.optim import SurrogateOptimiser +from waggon.surrogates import GP +from waggon.acquisitions import EI + + +def init_samples(filename: str, optimizer: SurrogateOptimiser): + # samples already exist + if os.path.exists(filename): + with open(filename, 'rb') as f: + samples = pickle.load(f) + return samples[:, :-1], samples[:, -1].reshape(-1, 1) + + # make samples if they aren't exist + X = optimizer.create_candidates() + X, y = optimizer.func.sample(X) + samples = np.concatenate((X, y), axis=1) + + # save samples + with open(filename, 'wb') as f: + pickle.dump(samples, f) + + return X, y + + +opt = SurrogateOptimiser( + func=conv.ConvNN(verbose=1), + surr=GP(), + acqf=EI(), + error_type='f', + num_opt_candidates=1, + n_candidates=25, + seed=2, + max_iter=3, + verbose=2 +) + +X, y = init_samples('benchmarks/conv_nn/init.pkl', opt) +result = opt.optimise(X, y) + +print(opt.res) +print(opt.params) diff --git a/benchmarks/test.py b/benchmarks/test.py deleted file mode 100644 index 019e451..0000000 --- a/benchmarks/test.py +++ /dev/null @@ -1,20 +0,0 @@ -import conv_nn as conv - -from waggon import functions as f -from waggon.optim import SurrogateOptimiser -from waggon.surrogates import GP, DGP -from waggon.acquisitions import CB - - -opt = SurrogateOptimiser( - func=conv.ConvNN(), - surr=GP(n_epochs=10), - acqf=CB(), - seed=2, - max_iter=10, - plot_results=True, - verbose=2 -) - -result = opt.optimise() -print(result) \ No newline at end of file diff --git a/test_results/classifier/EI/GP/10_08_18_16_26.pkl b/test_results/classifier/EI/GP/10_08_18_16_26.pkl new file mode 100644 index 0000000..3f1a3b8 Binary files /dev/null and b/test_results/classifier/EI/GP/10_08_18_16_26.pkl differ diff --git a/test_results/classifier/EI/GP/10_08_18_21_42.pkl b/test_results/classifier/EI/GP/10_08_18_21_42.pkl new file mode 100644 index 0000000..b6c5d2b Binary files /dev/null and b/test_results/classifier/EI/GP/10_08_18_21_42.pkl differ diff --git a/test_results/classifier/EI/GP/10_08_18_39_29.pkl b/test_results/classifier/EI/GP/10_08_18_39_29.pkl new file mode 100644 index 0000000..3f1a3b8 Binary files /dev/null and b/test_results/classifier/EI/GP/10_08_18_39_29.pkl differ diff --git a/test_results/classifier/LCB/GP/05_08_16_47_28.pkl b/test_results/classifier/LCB/GP/05_08_16_47_28.pkl new file mode 100644 index 0000000..627eedf Binary files /dev/null and b/test_results/classifier/LCB/GP/05_08_16_47_28.pkl differ diff --git a/test_results/classifier/LCB/GP/06_08_18_48_30.pkl b/test_results/classifier/LCB/GP/06_08_18_48_30.pkl new file mode 100644 index 0000000..13b79ac Binary files /dev/null and b/test_results/classifier/LCB/GP/06_08_18_48_30.pkl differ diff --git a/waggon/__pycache__/__init__.cpython-311.pyc b/waggon/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..db5a738 Binary files /dev/null and b/waggon/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/acquisitions/__pycache__/__init__.cpython-311.pyc b/waggon/acquisitions/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..a0e7e18 Binary files /dev/null and b/waggon/acquisitions/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/acquisitions/__pycache__/acquisition.cpython-311.pyc b/waggon/acquisitions/__pycache__/acquisition.cpython-311.pyc new file mode 100644 index 0000000..33b1fbe Binary files /dev/null and b/waggon/acquisitions/__pycache__/acquisition.cpython-311.pyc differ diff --git a/waggon/acquisitions/__pycache__/base.cpython-311.pyc b/waggon/acquisitions/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000..9bc53cd Binary files /dev/null and b/waggon/acquisitions/__pycache__/base.cpython-311.pyc differ diff --git a/waggon/functions/__pycache__/__init__.cpython-311.pyc b/waggon/functions/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..69bd87e Binary files /dev/null and b/waggon/functions/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/functions/__pycache__/base.cpython-311.pyc b/waggon/functions/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000..08c09a8 Binary files /dev/null and b/waggon/functions/__pycache__/base.cpython-311.pyc differ diff --git a/waggon/functions/__pycache__/test_functions.cpython-311.pyc b/waggon/functions/__pycache__/test_functions.cpython-311.pyc new file mode 100644 index 0000000..eb96770 Binary files /dev/null and b/waggon/functions/__pycache__/test_functions.cpython-311.pyc differ diff --git a/waggon/functions/__pycache__/utils.cpython-311.pyc b/waggon/functions/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000..0fd9335 Binary files /dev/null and b/waggon/functions/__pycache__/utils.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/__init__.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..04f494a Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/ackley.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/ackley.cpython-311.pyc new file mode 100644 index 0000000..c6d0eae Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/ackley.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/himmelblau.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/himmelblau.cpython-311.pyc new file mode 100644 index 0000000..6e22c64 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/himmelblau.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/holder.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/holder.cpython-311.pyc new file mode 100644 index 0000000..8b76380 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/holder.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/levi.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/levi.cpython-311.pyc new file mode 100644 index 0000000..d47a534 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/levi.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/rosenbrock.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/rosenbrock.cpython-311.pyc new file mode 100644 index 0000000..0773dcf Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/rosenbrock.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/sphere.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/sphere.cpython-311.pyc new file mode 100644 index 0000000..473a342 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/sphere.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/submanifold.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/submanifold.cpython-311.pyc new file mode 100644 index 0000000..04aa904 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/submanifold.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/tang.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/tang.cpython-311.pyc new file mode 100644 index 0000000..6729b40 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/tang.cpython-311.pyc differ diff --git a/waggon/functions/landscapes/__pycache__/thc.cpython-311.pyc b/waggon/functions/landscapes/__pycache__/thc.cpython-311.pyc new file mode 100644 index 0000000..5ebd043 Binary files /dev/null and b/waggon/functions/landscapes/__pycache__/thc.cpython-311.pyc differ diff --git a/waggon/optim/__pycache__/__init__.cpython-311.pyc b/waggon/optim/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..5995a98 Binary files /dev/null and b/waggon/optim/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/optim/__pycache__/barycentre.cpython-311.pyc b/waggon/optim/__pycache__/barycentre.cpython-311.pyc new file mode 100644 index 0000000..19bef0a Binary files /dev/null and b/waggon/optim/__pycache__/barycentre.cpython-311.pyc differ diff --git a/waggon/optim/__pycache__/base.cpython-311.pyc b/waggon/optim/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000..226ce65 Binary files /dev/null and b/waggon/optim/__pycache__/base.cpython-311.pyc differ diff --git a/waggon/optim/__pycache__/surrogate.cpython-311.pyc b/waggon/optim/__pycache__/surrogate.cpython-311.pyc new file mode 100644 index 0000000..a92e2d9 Binary files /dev/null and b/waggon/optim/__pycache__/surrogate.cpython-311.pyc differ diff --git a/waggon/optim/__pycache__/utils.cpython-311.pyc b/waggon/optim/__pycache__/utils.cpython-311.pyc new file mode 100644 index 0000000..8961291 Binary files /dev/null and b/waggon/optim/__pycache__/utils.cpython-311.pyc differ diff --git a/waggon/optim/base.py b/waggon/optim/base.py index 35dd9ec..a5b94dc 100644 --- a/waggon/optim/base.py +++ b/waggon/optim/base.py @@ -77,9 +77,9 @@ def __init__(self, **kwargs): else: raise ValueError elif self.error_type == 'f': - if self.func.f_min is not None: + if hasattr(self.func, 'f_min') and self.func.f_min is not None: self.error = lambda y: np.min(np.linalg.norm(self.func.f_min - transform(y), ord=2, axis=-1), axis=-1) - elif self.func.glob_min is not None: + elif hasattr(self.func, 'glob_min') and self.func.glob_min is not None: self.func_f_min = self.func(self.func.glob_min) self.error = lambda y: np.min(np.linalg.norm(transform(self.func_f_min) - transform(y), ord=2, axis=-1), axis=-1) else: @@ -138,10 +138,10 @@ def optimise(self, X=None, y=None): if X is None: X = self.create_candidates(strength=1 if not self.olhs else 2) X, y = self.func.sample(X) - self.res = np.array([[np.min(self.func(X))]]) - self.params = np.array([X[np.argmin(self.func(X)), :]]) + self.res = np.array([[np.min(y)]]) + self.params = np.array([X[np.argmin(y), :]]) else: - self.res = np.array([np.min(y)]) + self.res = np.array([[np.min(y)]]) self.params = np.array([X[np.argmin(y), :]]) if self.verbose == 0: diff --git a/waggon/optim/baselines/__pycache__/__init__.cpython-311.pyc b/waggon/optim/baselines/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..5fa22b7 Binary files /dev/null and b/waggon/optim/baselines/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/optim/baselines/__pycache__/evolutions.cpython-311.pyc b/waggon/optim/baselines/__pycache__/evolutions.cpython-311.pyc new file mode 100644 index 0000000..37835ea Binary files /dev/null and b/waggon/optim/baselines/__pycache__/evolutions.cpython-311.pyc differ diff --git a/waggon/optim/baselines/__pycache__/protes.cpython-311.pyc b/waggon/optim/baselines/__pycache__/protes.cpython-311.pyc new file mode 100644 index 0000000..caa3d9f Binary files /dev/null and b/waggon/optim/baselines/__pycache__/protes.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/__init__.cpython-311.pyc b/waggon/surrogates/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000..9702ff8 Binary files /dev/null and b/waggon/surrogates/__pycache__/__init__.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/base.cpython-311.pyc b/waggon/surrogates/__pycache__/base.cpython-311.pyc new file mode 100644 index 0000000..bdfcf50 Binary files /dev/null and b/waggon/surrogates/__pycache__/base.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/bnn.cpython-311.pyc b/waggon/surrogates/__pycache__/bnn.cpython-311.pyc new file mode 100644 index 0000000..2bf5967 Binary files /dev/null and b/waggon/surrogates/__pycache__/bnn.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/de.cpython-311.pyc b/waggon/surrogates/__pycache__/de.cpython-311.pyc new file mode 100644 index 0000000..ec607a0 Binary files /dev/null and b/waggon/surrogates/__pycache__/de.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/dgp.cpython-311.pyc b/waggon/surrogates/__pycache__/dgp.cpython-311.pyc new file mode 100644 index 0000000..9bb7faa Binary files /dev/null and b/waggon/surrogates/__pycache__/dgp.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/gan.cpython-311.pyc b/waggon/surrogates/__pycache__/gan.cpython-311.pyc new file mode 100644 index 0000000..845e9b3 Binary files /dev/null and b/waggon/surrogates/__pycache__/gan.cpython-311.pyc differ diff --git a/waggon/surrogates/__pycache__/gp.cpython-311.pyc b/waggon/surrogates/__pycache__/gp.cpython-311.pyc new file mode 100644 index 0000000..e70f21c Binary files /dev/null and b/waggon/surrogates/__pycache__/gp.cpython-311.pyc differ