Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
code_sharing/gpr_manager.py
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
173 lines (133 sloc)
7.14 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from artiq.experiment import * | |
import numpy as np | |
import time | |
import matplotlib.pyplot as plt | |
import scipy.optimize as opt | |
import sys | |
import telegram_send # for sending phone notification | |
import mss # for screen shot | |
import os.path | |
import pickle | |
import mss # for screen shot | |
import threading | |
sys.path.append(r"C:\Users\hoodl\Documents\GitHub\DataAnalysis") | |
from GPR_Optimizer import GPR_Optimizer | |
####################################################################################### | |
from LiCs_MOT_Main import LiCs_MOT_Main | |
####################################################################################### | |
''' You must give cost and cost_err. cost_err must be non-zero. If noise_level is not None then the | |
GaussianKernel will handle noise. cost_err determines the diagonals of the covariance matrix and must be non-zero for fitting. | |
But they also estimate hte noise and make the gaussian kernel do less work. So put a conservatively low guess for the noise there (in the unist of the cost). | |
give noise_level in terms of the the expected noise std divided by the y standard dev because normalize_y = True. | |
''' | |
####################################################################################### | |
class gpr_manager(LiCs_MOT_Main, EnvExperiment): | |
####################################################################################### | |
def build(self): | |
self.setattr_argument("Averaging", NumberValue(default = 1, ndecimals=0, step=1)) | |
super().build() | |
def prepare(self): | |
self.v1 = self.v2 = self.v3 = self.v4 = self.v5 = self.v6 = self.v7 = self.v8 = self.v9 = self.v10 = self.v11 = self.v12 = 0. | |
self.number_points = 3000 | |
super().prepare(manager_truth = True, plot_index = True) | |
def run(self): | |
def update_optimizer(config_path): | |
nonlocal opt | |
opt = opt.load_optimizer_from_file(config_path) | |
config_path=r'C:\Users\hoodl\Documents\GitHub\DataAnalysis\optimizer_config.json' #change this if env changed | |
v_list = [self.v1, self.v2, self.v3, self.v4, self.v5, self.v6, self.v7, self.v8, self.v9, self.v10, self.v11, self.v12] | |
# dimensions = [(-1.,1.),(-3.,3.),(-3.,3.),(-0.5,0.5),(-1.,1.),(-1.,1.),(-1.,1.),(-0.5,1.),(-1.,1.),(-1.,1.),(-1.,1.)] # Make sure these are floats! | |
dimensions = [(-2.,2.),(-3.,3.),(-3.,3.),(-0.5,0.5),(-2.,2.),(-3.,3.),(-3.,3.),(-0.5,0.5)] | |
opt = GPR_Optimizer( | |
dimensions = dimensions, | |
length_scale = [0.3]*len(dimensions), | |
length_scale_bounds = [(0.05, 0.2)]*len(dimensions), # can be same | |
noise_level = 0.1, # float or None # normalized by std | |
noise_level_bounds = [(1e-2,1e-1)], # normalized by std | |
n_initial_points = 100, | |
acq_func = "LCB", #"EI", "LCB", "PI", "gp_hedge" (all three) | |
kappa = 2*1.96, # for LCB and PI. 1.96 for std. Larger for more exploration. Smaller for local optimization. | |
#xi = 2*0.01, # expected improvement for EI | |
normalize_y = True, # subtracts mean, and normalize std y data right before fitting without normalizing alpha or hyperparameters. Don't use. | |
x_initial = [ [0.]*len(dimensions) ], # initial x points to run | |
n_restarts_optimizer=2, # how many times to search for hyperparameters with lbfgs if turned on | |
sigma_value = 1, # sigma^2 in the kernel sigma^2 exp(- |x - x'|^2/2l^2 ), initial kernel value. I think make similar to spread of y. | |
sigma_value_bounds = (1e-2, 1), # Constant Kernel bounds | |
acq_optimizer ='lbfgs', # or 'lbfgs' 'sampling' to speed up | |
verbose = True, | |
# alpha = 1e-3 | |
config_path=config_path, | |
# switch=False | |
) | |
opt.write_to_file(config_path=config_path) | |
# input_thread = InputThread(optimizer=opt,config_path=config_path, update_callback=update_optimizer) | |
# if(opt.switch==True): | |
# input_thread.start() | |
for iteration in range(self.number_points): | |
print('Iteration number:',iteration) | |
# while input_thread.paused: | |
# time.sleep(0.1) | |
v_list = opt.ask() | |
print(v_list) | |
try: | |
cost, cost_err = super().run(v_list = v_list) | |
if cost > 0.: | |
print("COST IS POSITIVE! MAKE SURE IT IS NEGATIVE!") | |
opt.tell(v_list, cost, cost_err) | |
opt.write_to_file(config_path=config_path) # update config | |
except Exception as e: | |
print('Bad Point!!!', e) | |
file_path = "C:/Users/hoodl/Desktop/save_opt_folder/file_" + self.buildtime | |
with open(file_path, 'wb') as file: pickle.dump(opt, file) | |
# Save file | |
if 0: | |
try: | |
if self.index % 1==0 or self.index == self.number_points : | |
hist_counts0roiX = [] | |
hist_counts1roiX = [] | |
for i in range(self.number_of_rois): | |
name = "hist_counts0roi"+str(i) | |
hist_counts0roiX.append(self.get_dataset(name, archive = False)) | |
name = "hist_counts1roi"+str(i) | |
hist_counts0roiX.append(self.get_dataset(name, archive = False)) | |
np.savez("S:/flir_images/binaries/"+self.buildtime, | |
index = self.npindex, | |
variable = self.npvariable, | |
variable2 = self.npvariable2, | |
variable3 = self.npvariable3, | |
cost = self.npcost, | |
averaging = self.Averaging, | |
hist_bins = self.get_dataset("hist_bins", archive = False), | |
hist_counts0roiX = hist_counts0roiX, | |
hist_counts1roiX = hist_counts1roiX) | |
#amp = self.npamp,amperror = self.npamperr, sigmax = self.npsigmax, sigmaxerror = self.npsigmaxerr, sigmay = self.npsigmay, sigmayerror = self.npsigmayerr) | |
print("Saving npz: "+ str(self.buildtime)) | |
except: | |
# print('Failed to Save!!!') | |
pass | |
# else: | |
# self.termination = 1 | |
# break | |
class InputThread(threading.Thread): | |
def __init__(self,optimizer, config_path, update_callback): | |
super().__init__() | |
self.opt = optimizer | |
self.config_path=config_path | |
self.update_callback = update_callback | |
self.paused = False | |
self.daemon = True | |
def run(self): | |
while opt.switch==True: | |
if not self.paused: | |
input("press enter to pause...\n") | |
self.paused = True | |
elif self.paused: | |
input("paused, press enter to resume...\n") | |
self.update_callback(self.config_path) | |
self.paused = False | |
# def check_user_input(input_thread): | |
# input_thread.join(timeout=0.1) | |
# if input_thread.is_alive(): | |
# return False | |
# else: | |
# return input_thread.input == '' |