Permalink
Cannot retrieve contributors at this time
Name already in use
A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
DataAnalysis/GPR_Optimizer_test.py
Go to fileThis commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
122 lines (100 sloc)
5.18 KB
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
from GPR_Optimizer import GPR_Optimizer | |
from GPR_Optimizer import multivariate_gaussian_signal, calculate_distances | |
import numpy as np | |
import matplotlib.pyplot as plt | |
#from scipy.stats import multivariate_normal | |
#from skopt.plots import plot_evaluations, plot_convergence, plot_objective, plot_gaussian_process, plot_histogram, plot_histogram, plot_objective_2D, plot_regret | |
#from skopt.plots import expected_minimum, expected_minimum_random_sampling | |
import warnings | |
from joblib import Parallel, delayed | |
import time | |
import threading | |
def check_user_input(input_thread): | |
input_thread.join(timeout=0.1) | |
if input_thread.is_alive(): | |
return False | |
else: | |
return input_thread.input == '' | |
# Define the cost function | |
ndims = 2 | |
norm_y = 1 #normalization for y. y = y/norm_y ############## CHANGE ############### | |
def objective(x): | |
offset = 0 | |
val = multivariate_gaussian_signal(x, ndims=ndims, amplitude=100, sigma=0.2, noisetype='gaussian', background_noise=10, N_gaussian=10) + offset | |
#err = np.sqrt(np.abs(val)) + 1 # std | |
err = 1e0 # constant value. | |
return val/norm_y, err/norm_y | |
def main(): | |
config_path='C:/Users/13321/OneDrive/文档/DataAnalysis/optimizer_config.json' | |
opt = GPR_Optimizer( | |
dimensions = [(-1., 1.)] * ndims, | |
length_scale = [0.2] * ndims, # in units of parameters | |
length_scale_bounds = [(1e-2, 1e2)] * ndims, # can be same | |
noise_level = 0.1, # float or None. WhiteNoise kernel hp. if normalize_y=True, then this is noise/ std(y) | |
noise_level_bounds = [(1e-3,1e-1)], | |
n_initial_points = 60, # using LH sampling for random points | |
acq_func = "LCB", #"EI", "LCB", "PI", "gp_hedge" (all three). LCB explores the most. Stay wiht it. | |
kappa = 4*1.96, # for LCB and PI. 1.96 for std. Larger for more exploration. Smaller for local optimization. | |
xi = 5*0.01, # expected improvement for EI. Not used for LCB. | |
normalize_y = True, # subtracts mean, and normalize std y data right before fitting without normalizing alpha or hyperparameters. Don't use. | |
x_initial = [ [1]*ndims ], # initial x points to run | |
n_restarts_optimizer=0, # how many times to search for hyperparameters. Takes 10,000 values in hp space. If acq_optimzer = 'lbfgs', does grad minimaization on 'n_restart_optimizer' + 1 the best points | |
sigma_value = 0.5, # sigma^2 in the kernel sigma^2 exp(- |x - x'|^2/2l^2 ), initial kernel value. When normalize_y = true, this shoudl be between 1e-2 and 1. It is similar to the spread of the y, so becaues we are using normalize_ y = True, shoudl be < 1 | |
sigma_value_bounds=(1e-1,1), # Constant Kernel bounds | |
acq_optimizer ='sampling', # 'lbfgs' or 'sampling' to speed up. It samples 10,000 points in hp space. If lbfgs, does optimization (20 iteractiosn of lbfgs) on these points. If sampling, just picks the best. Can take a long time for many parmaters. Currently if tell takes > 1s, it will put out warning. | |
verbose = False, | |
alpha = [] # don't use. Will not normalize std when using normalize_y = True. Use tell(x,y,y_err) to enter data. | |
) | |
def update_optimizer(config_path): | |
nonlocal opt # 使用 nonlocal 声明以修改外层作用域的opt | |
opt = opt.load_optimizer_from_file(config_path) | |
#opt.std_for_y_norm = 1 | |
#opt.mean_for_y_norm = 0 | |
#opt.models[-1].y_train_mean_ | |
#x_true = [0]*ndims # used for convergence | |
# Run optimizer | |
opt.write_to_file(config_path=config_path) | |
input_thread = InputThread(optimizer=opt,config_path=config_path, update_callback=update_optimizer) | |
input_thread.start() | |
for i in range(100): | |
print(i) | |
while input_thread.paused: | |
time.sleep(0.1) # 添加短暂延迟以避免密集的CPU使用 | |
next_x = opt.ask() | |
f_val, err = objective(next_x) | |
opt.tell(next_x, f_val, error=err, fit=True) | |
opt.write_to_file(config_path=config_path) # 更新配置文件 | |
time.sleep(0.5) | |
# Plot results | |
res = opt.get_result() | |
print("best point:", res.x, res.fun) | |
xvalues = res.x_iters | |
yvalues = res.func_vals | |
# # Plots | |
# plt.figure() | |
# opt.plot_objective() # plots the gpr function (add error), minimum='expected_minimum' or 'result' | |
# plt.figure() | |
# opt.plot_convergence() # plot at every iteration the best point. I want to plot each point. | |
# plt.figure() | |
# opt.plot_hyperparameters() # if normalize_y = True, then this is in those units. | |
# plt.figure() | |
# opt.plot_evaluations() | |
class InputThread(threading.Thread): | |
def __init__(self,optimizer, config_path, update_callback): | |
super().__init__() | |
self.opt = optimizer | |
self.config_path=config_path | |
self.update_callback = update_callback | |
self.paused = False | |
self.daemon = True | |
def run(self): | |
while True: | |
if not self.paused: | |
input("press enter to pause...\n") | |
self.paused = True | |
elif self.paused: | |
input("paused, press enter to resume...\n") | |
self.update_callback(self.config_path) | |
self.paused = False | |
if __name__ == "__main__": | |
main() |