diff --git a/cbc_scripts.py b/cbc_scripts.py new file mode 100644 index 0000000000000000000000000000000000000000..cca9e6e94b780d4ae7ff73e2863d8e451c57035e --- /dev/null +++ b/cbc_scripts.py @@ -0,0 +1,739 @@ +#! /usr/bin/env python + +""" Script to generate training and testing samples +""" + +from __future__ import division, print_function + +import numpy as np +import bilby +from sys import exit +import os, glob, shutil +import matplotlib +matplotlib.use('agg') +import matplotlib.pyplot as plt +from scipy import integrate, interpolate +import scipy +import lalsimulation +import lal +import time +import h5py +from scipy.ndimage.interpolation import shift +import argparse +from gwpy.timeseries import TimeSeries + +def parser(): + """ Parses command line arguments + + Returns + ------- + arguments + """ + + #TODO: complete help sections + parser = argparse.ArgumentParser(prog='bilby_pe.py', description='script for generating bilby samples/posterior') + + # arguments for data + parser.add_argument('-samplingfrequency', type=float, help='sampling frequency of signal') + parser.add_argument('-samplers', nargs='+', type=str, help='list of samplers to use to generate') + parser.add_argument('-duration', type=float, help='duration of signal in seconds') + parser.add_argument('-Ngen', type=int, help='number of samples to generate') + parser.add_argument('-refgeocenttime', type=float, help='reference geocenter time') + parser.add_argument('-bounds', type=str, help='dictionary of the bounds') + parser.add_argument('-fixedvals', type=str, help='dictionary of the fixed values') + parser.add_argument('-randpars', nargs='+', type=str, help='list of pars to randomize') + parser.add_argument('-infpars', nargs='+', type=str, help='list of pars to infer') + parser.add_argument('-label', type=str, help='label of run') + parser.add_argument('-outdir', type=str, help='output directory') + parser.add_argument('-training', type=str, help='boolean for train/test config') + parser.add_argument('-seed', type=int, help='random seed') + parser.add_argument('-dope', type=str, help='boolean for whether or not to do PE') + + + return parser.parse_args() + +def gen_real_noise(duration, + sampling_frequency, + det, + ref_geocent_time, psd_files=[], + real_noise_seg =[None,None] + ): + """ pull real noise samples + """ + + # compute the number of time domain samples + Nt = int(sampling_frequency*duration) + + # Get ifos bilby variable + ifos = bilby.gw.detector.InterferometerList(det) + + start_open_seg, end_open_seg = real_noise_seg # 1 sec noise segments + for ifo_idx,ifo in enumerate(ifos): # iterate over interferometers + time_series = TimeSeries.find('%s:GDS-CALIB_STRAIN' % det[ifo_idx], + start_open_seg, end_open_seg) # pull timeseries data using gwpy + ifo.set_strain_data_from_gwpy_timeseries(time_series=time_series) # input new ts into bilby ifo + + noise_sample = ifos[0].strain_data.frequency_domain_strain # get frequency domain strain + noise_sample /= ifos[0].amplitude_spectral_density_array # assume default psd from bilby + noise_sample = np.sqrt(2.0*Nt)*np.fft.irfft(noise_sample) # convert frequency to time domain + + return noise_sample + + +def gen_template(duration, + sampling_frequency, + pars, + ref_geocent_time, psd_files=[], + use_real_det_noise=False, + real_noise_seg =[None,None] + ): + """ Generates a whitened waveforms in Gaussian noise. + + Parameters + ---------- + duration: float + duration of the signal in seconds + sampling_frequency: float + sampling frequency of the signal + pars: dict + values of source parameters for the waveform + ref_geocent_time: float + reference geocenter time of injected signal + psd_files: list + list of psd files to use for each detector (if other than default is wanted) + use_real_det_noise: bool + if True, use real ligo noise around ref_geocent_time + real_noise_seg: list + list containing the starting and ending times of the real noise + segment + + Returns + ------- + whitened noise-free signal: array_like + whitened noisy signal: array_like + injection_parameters: dict + source parameter values of injected signal + ifos: dict + interferometer properties + waveform_generator: bilby function + function used by bilby to inject signal into noise + """ + + if sampling_frequency>4096: + print('EXITING: bilby doesn\'t seem to generate noise above 2048Hz so lower the sampling frequency') + exit(0) + + # compute the number of time domain samples + Nt = int(sampling_frequency*duration) + + # define the start time of the timeseries + start_time = ref_geocent_time-duration/2.0 + + # fix parameters here + injection_parameters = dict( + mass_1=pars['mass_1'],mass_2=pars['mass_2'], a_1=pars['a_1'], a_2=pars['a_2'], tilt_1=pars['tilt_1'], tilt_2=pars['tilt_2'], + phi_12=pars['phi_12'], phi_jl=pars['phi_jl'], luminosity_distance=pars['luminosity_distance'], theta_jn=pars['theta_jn'], psi=pars['psi'], + phase=pars['phase'], geocent_time=pars['geocent_time'], ra=pars['ra'], dec=pars['dec']) + + # Fixed arguments passed into the source model + waveform_arguments = dict(waveform_approximant='IMRPhenomPv2', + reference_frequency=20., minimum_frequency=20.) + + # Create the waveform_generator using a LAL BinaryBlackHole source function + waveform_generator = bilby.gw.WaveformGenerator( + duration=duration, sampling_frequency=sampling_frequency, + frequency_domain_source_model=bilby.gw.source.lal_binary_black_hole, + parameter_conversion=bilby.gw.conversion.convert_to_lal_binary_black_hole_parameters, + waveform_arguments=waveform_arguments, + start_time=start_time) + + # create waveform + wfg = waveform_generator + + # extract waveform from bilby + wfg.parameters = injection_parameters + freq_signal = wfg.frequency_domain_strain() + time_signal = wfg.time_domain_strain() + + # Set up interferometers. These default to their design + # sensitivity + + ifos = bilby.gw.detector.InterferometerList(pars['det']) + + # If user is specifying PSD files + if len(psd_files) > 0: + type_psd = psd_files[0].split('/')[-1].split('_')[-1].split('.')[0] + for int_idx,ifo in enumerate(ifos): + if type_psd == 'psd': + ifo.power_spectral_density = bilby.gw.detector.PowerSpectralDensity(psd_file=psd_files[0]) + elif type_psd == 'asd': + ifo.power_spectral_density = bilby.gw.detector.PowerSpectralDensity(asd_file=psd_files[0]) + else: + print('Could not determine whether psd or asd ...') + exit() + + ifos.set_strain_data_from_power_spectral_densities( + sampling_frequency=sampling_frequency, duration=duration, + start_time=start_time) + + # inject signal + ifos.inject_signal(waveform_generator=waveform_generator, + parameters=injection_parameters) + + print('... Injected signal') + whitened_signal_td_all = [] + whitened_h_td_all = [] + # iterate over ifos + for i in range(len(pars['det'])): + # get frequency domain noise-free signal at detector + signal_fd = ifos[i].get_detector_response(freq_signal, injection_parameters) + + # whiten frequency domain noise-free signal (and reshape/flatten) + whitened_signal_fd = signal_fd/ifos[i].amplitude_spectral_density_array + #whitened_signal_fd = whitened_signal_fd.reshape(whitened_signal_fd.shape[0]) + + # get frequency domain signal + noise at detector + h_fd = ifos[i].strain_data.frequency_domain_strain + + # inverse FFT noise-free signal back to time domain and normalise + whitened_signal_td = np.sqrt(2.0*Nt)*np.fft.irfft(whitened_signal_fd) + + # whiten noisy frequency domain signal + whitened_h_fd = h_fd/ifos[i].amplitude_spectral_density_array + + # inverse FFT noisy signal back to time domain and normalise + whitened_h_td = np.sqrt(2.0*Nt)*np.fft.irfft(whitened_h_fd) + + whitened_h_td_all.append([whitened_h_td]) + whitened_signal_td_all.append([whitened_signal_td]) + + print('... Whitened signals') + return np.squeeze(np.array(whitened_signal_td_all),axis=1),np.squeeze(np.array(whitened_h_td_all),axis=1),injection_parameters,ifos,waveform_generator + +def run(sampling_frequency=256.0, + duration=1., + N_gen=1000, + bounds=None, + fixed_vals=None, + rand_pars=[None], + inf_pars=[None], + ref_geocent_time=1126259642.5, + training=True, + do_pe=False, + label='test_results', + out_dir='bilby_output', + seed=None, + samplers=['vitamin','dynesty'], + condor_run=False, + params=None, + det=['H1','L1','V1'], + psd_files=[], + use_real_det_noise=False, + use_real_events=False, + samp_idx=False, + ): + """ Main function to generate both training sample time series + and test sample time series/posteriors. + + Parameters + ---------- + sampling_frequency: float + sampling frequency of the signals + duration: float + duration of signals in seconds + N_gen: int + number of test/training timeseries to generate + bounds: dict + allowed bounds of timeseries source parameters + fixed_vals: dict + fixed values of source parameters not randomized + rand_pars: list + source parameters to randomize + inf_pars: list + source parameters to infer + ref_geocent_time: float + reference geocenter time of injected signals + training: bool + if true, generate training timeseries + do_pe: bool + if true, generate posteriors in addtion to test sample time series + label: string + label to give to saved files + out_dir: string + output directory of saved files + seed: float + random seed for generating timeseries and posterior samples + samplers: list + samplers to use when generating posterior samples + condor_run: bool + if true, use setting to make condor jobs run properly + params: dict + general script run parameters + det: list + detectors to use + psd_files + optional list of psd files to use for each detector + """ + + # Set up a random seed for result reproducibility. This is optional! + if seed is not None: + np.random.seed(seed) + + # Set up a PriorDict, which inherits from dict. + priors = bilby.gw.prior.BBHPriorDict() + priors.pop('chirp_mass') + priors['mass_ratio'] = bilby.gw.prior.Constraint(minimum=0.125, maximum=1, name='mass_ratio', latex_label='$q$', unit=None) + if np.any([r=='geocent_time' for r in rand_pars]): + priors['geocent_time'] = bilby.core.prior.Uniform( + minimum=ref_geocent_time + bounds['geocent_time_min'], + maximum=ref_geocent_time + bounds['geocent_time_max'], + name='geocent_time', latex_label='$t_c$', unit='$s$') + else: + priors['geocent_time'] = fixed_vals['geocent_time'] + + if np.any([r=='mass_1' for r in rand_pars]): + print('inside m1') + priors['mass_1'] = bilby.gw.prior.Uniform(name='mass_1', minimum=bounds['mass_1_min'], maximum=bounds['mass_1_max'],unit='$M_{\odot}$') + else: + priors['mass_1'] = fixed_vals['mass_1'] + + if np.any([r=='mass_2' for r in rand_pars]): + priors['mass_2'] = bilby.gw.prior.Uniform(name='mass_2', minimum=bounds['mass_2_min'], maximum=bounds['mass_2_max'],unit='$M_{\odot}$') + else: + priors['mass_2'] = fixed_vals['mass_2'] + + if np.any([r=='a_1' for r in rand_pars]): + priors['a_1'] = bilby.gw.prior.Uniform(name='a_1', minimum=bounds['a_1_min'], maximum=bounds['a_1_max']) + else: + priors['a_1'] = fixed_vals['a_1'] + + if np.any([r=='a_2' for r in rand_pars]): + priors['a_2'] = bilby.gw.prior.Uniform(name='a_2', minimum=bounds['a_2_min'], maximum=bounds['a_2_max']) + else: + priors['a_2'] = fixed_vals['a_2'] + + if np.any([r=='tilt_1' for r in rand_pars]): + #priors['tilt_1'] = bilby.gw.prior.Uniform(name='tilt_1', minimum=bounds['tilt_1_min'], maximum=bounds['tilt_1_max']) + priors['tilt_1'] = bilby.core.prior.Sine(name='tilt_1', minimum=bounds['tilt_1_min'], maximum=bounds['tilt_1_max']) + else: + priors['tilt_1'] = fixed_vals['tilt_1'] + + if np.any([r=='tilt_2' for r in rand_pars]): + #priors['tilt_2'] = bilby.gw.prior.Uniform(name='tilt_2', minimum=bounds['tilt_2_min'], maximum=bounds['tilt_2_max']) + priors['tilt_2'] = bilby.core.prior.Sine(name='tilt_2', minimum=bounds['tilt_2_min'], maximum=bounds['tilt_2_max']) + else: + priors['tilt_2'] = fixed_vals['tilt_2'] + + if np.any([r=='phi_12' for r in rand_pars]): + priors['phi_12'] = bilby.gw.prior.Uniform(name='phi_12', minimum=bounds['phi_12_min'], maximum=bounds['phi_12_max'], boundary='periodic') + else: + priors['phi_12'] = fixed_vals['phi_12'] + + if np.any([r=='phi_jl' for r in rand_pars]): + priors['phi_jl'] = bilby.gw.prior.Uniform(name='phi_jl', minimum=bounds['phi_jl_min'], maximum=bounds['phi_jl_max'], boundary='periodic') + else: + priors['phi_jl'] = fixed_vals['phi_jl'] + + if np.any([r=='ra' for r in rand_pars]): + priors['ra'] = bilby.gw.prior.Uniform(name='ra', minimum=bounds['ra_min'], maximum=bounds['ra_max'], boundary='periodic') + else: + priors['ra'] = fixed_vals['ra'] + + if np.any([r=='dec' for r in rand_pars]): + pass + else: + priors['dec'] = fixed_vals['dec'] + + if np.any([r=='psi' for r in rand_pars]): + priors['psi'] = bilby.gw.prior.Uniform(name='psi', minimum=bounds['psi_min'], maximum=bounds['psi_max'], boundary='periodic') + else: + priors['psi'] = fixed_vals['psi'] + + if np.any([r=='theta_jn' for r in rand_pars]): + pass + else: + priors['theta_jn'] = fixed_vals['theta_jn'] + + if np.any([r=='phase' for r in rand_pars]): + priors['phase'] = bilby.gw.prior.Uniform(name='phase', minimum=bounds['phase_min'], maximum=bounds['phase_max'], boundary='periodic') + else: + priors['phase'] = fixed_vals['phase'] + + if np.any([r=='luminosity_distance' for r in rand_pars]): + priors['luminosity_distance'] = bilby.gw.prior.Uniform(name='luminosity_distance', minimum=bounds['luminosity_distance_min'], maximum=bounds['luminosity_distance_max'], unit='Mpc') + else: + priors['luminosity_distance'] = fixed_vals['luminosity_distance'] + + # generate training samples + if training == True: + train_samples = real_noise_array = [] + snrs = [] + train_pars = np.zeros((N_gen,len(rand_pars))) + for i in range(N_gen): + + # sample from priors + pars = priors.sample() + pars['det'] = det + + # store the params + temp = [] + for p_idx,p in enumerate(rand_pars): + for q,qi in pars.items(): + if p==q: + #temp.append(qi-ref_geocent_time) if p=='geocent_time' else temp.append(qi) + if p == 'geocent_time': + train_pars[i,p_idx] = qi-ref_geocent_time + else: + train_pars[i,p_idx] = qi + #train_pars.append([temp]) + + # make the data - shift geocent time to correct reference + train_samp_noisefree, train_samp_noisy,_,ifos,_ = gen_template(duration,sampling_frequency, + pars,ref_geocent_time,psd_files, + use_real_det_noise=use_real_det_noise + ) + train_samples.append([train_samp_noisefree,train_samp_noisy]) + small_snr_list = [ifos[j].meta_data['optimal_SNR'] for j in range(len(pars['det']))] + snrs.append(small_snr_list) + #train_samples.append(gen_template(duration,sampling_frequency,pars,ref_geocent_time)[0:2]) + print('Made waveform %d/%d' % (i,N_gen)) + + train_samples_noisefree = np.array(train_samples)[:,0,:] + snrs = np.array(snrs) +# train_pars = np.array(train_pars) + return train_samples_noisefree,train_pars,snrs + + # otherwise we are doing test data + else: + + # generate parameters + pars = priors.sample() + pars['det'] = det + temp = [] + for p in rand_pars: + for q,qi in pars.items(): + if p==q: + temp.append(qi-ref_geocent_time) if p=='geocent_time' else temp.append(qi) + + # inject signal - shift geocent time to correct reference + test_samples_noisefree,test_samples_noisy,injection_parameters,ifos,waveform_generator = gen_template(duration,sampling_frequency, + pars,ref_geocent_time,psd_files) + + # get test sample snr + snr = np.array([ifos[j].meta_data['optimal_SNR'] for j in range(len(pars['det']))]) + + # if not doing PE then return signal data + if not do_pe: + return test_samples_noisy,test_samples_noisefree,np.array([temp]),snr + + try: + bilby.core.utils.setup_logger(outdir=out_dir, label=label) + except Exception as e: + print(e) + pass + + # Initialise the likelihood by passing in the interferometer data (ifos) and + # the waveform generator + phase_marginalization=True + likelihood = bilby.gw.GravitationalWaveTransient( + interferometers=ifos, waveform_generator=waveform_generator, phase_marginalization=phase_marginalization, + priors=priors) + + # save test waveform information + try: + os.mkdir('%s' % (out_dir+'_waveforms')) + except Exception as e: + print(e) + pass + + if params != None: + hf = h5py.File('%s/data_%d.h5py' % (out_dir+'_waveforms',int(label.split('_')[-1])),'w') + for k, v in params.items(): + try: + hf.create_dataset(k,data=v) + except: + pass + + hf.create_dataset('x_data', data=np.array([temp])) + for k, v in bounds.items(): + hf.create_dataset(k,data=v) + hf.create_dataset('y_data_noisefree', data=test_samples_noisefree) + hf.create_dataset('y_data_noisy', data=test_samples_noisy) + hf.create_dataset('rand_pars', data=np.string_(params['rand_pars'])) + hf.create_dataset('snrs', data=snr) + hf.close() + + # look for dynesty sampler option + if np.any([r=='dynesty1' for r in samplers]) or np.any([r=='dynesty2' for r in samplers]) or np.any([r=='dynesty' for r in samplers]): + + run_startt = time.time() + # Run sampler dynesty 1 sampler + + result = bilby.run_sampler( + likelihood=likelihood, priors=priors, sampler='dynesty', npoints=1000, nact=50, npool=8, dlogz=0.1, + injection_parameters=injection_parameters, outdir=out_dir+'_'+samplers[-1], label=label, + save='hdf5', plot=True) + run_endt = time.time() + + # save test sample waveform + hf = h5py.File('%s_%s/%s.h5py' % (out_dir,samplers[-1],label), 'w') + hf.create_dataset('noisy_waveform', data=test_samples_noisy) + hf.create_dataset('noisefree_waveform', data=test_samples_noisefree) + + # loop over randomised params and save samples + #for p in inf_pars: + for q,qi in result.posterior.items(): + #if p==q: + name = q + '_post' + print('saving PE samples for parameter {}'.format(q)) + hf.create_dataset(name, data=np.array(qi)) + hf.create_dataset("log_evidence", data=np.array(result.log_evidence)) + hf.create_dataset("log_evidence_error", data=np.array(result.log_evidence_err)) + hf.create_dataset("log_bayes_factor", data=np.array(result.log_bayes_factor)) + hf.create_dataset("log_noise_evidence", data=np.array(result.log_noise_evidence)) + + hf.create_dataset('runtime', data=(run_endt - run_startt)) + hf.close() + + # return samples if not doing a condor run + if condor_run == False: + # Make a corner plot. + result.plot_corner() + # remove unecessary files + png_files=glob.glob("%s_dynesty1/*.png*" % (out_dir)) + hdf5_files=glob.glob("%s_dynesty1/*.hdf5*" % (out_dir)) + pickle_files=glob.glob("%s_dynesty1/*.pickle*" % (out_dir)) + resume_files=glob.glob("%s_dynesty1/*.resume*" % (out_dir)) + pkl_files=glob.glob("%s_dynesty1/*.pkl*" % (out_dir)) + filelist = [png_files,hdf5_files,pickle_files,resume_files,pkl_files] + for file_type in filelist: + for file in file_type: + os.remove(file) + print('finished running pe') + return test_samples_noisy,test_samples_noisefree,np.array([temp]),snr + + run_startt = time.time() + + # look for cpnest sampler option + if np.any([r=='cpnest1' for r in samplers]) or np.any([r=='cpnest2' for r in samplers]) or np.any([r=='cpnest' for r in samplers]): + + # run cpnest sampler 1 + run_startt = time.time() + result = bilby.run_sampler( + likelihood=likelihood, priors=priors, sampler='cpnest', + nlive=2048,maxmcmc=1000, seed=1994, nthreads=10, + injection_parameters=injection_parameters, outdir=out_dir+'_'+samplers[-1], label=label, + save='hdf5', plot=True) + run_endt = time.time() + + # save test sample waveform + hf = h5py.File('%s_%s/%s.h5py' % (out_dir,samplers[-1],label), 'w') + hf.create_dataset('noisy_waveform', data=test_samples_noisy) + hf.create_dataset('noisefree_waveform', data=test_samples_noisefree) + + # loop over randomised params and save samples + #for p in inf_pars: + for q,qi in result.posterior.items(): + #if p==q: + name = q + '_post' + print('saving PE samples for parameter {}'.format(q)) + hf.create_dataset(name, data=np.array(qi)) + hf.create_dataset('runtime', data=(run_endt - run_startt)) + hf.close() + + # return samples if not doing a condor run + if condor_run == False: + # remove unecessary files + png_files=glob.glob("%s_cpnest1/*.png*" % (out_dir)) + hdf5_files=glob.glob("%s_cpnest1/*.hdf5*" % (out_dir)) + other_files=glob.glob("%s_cpnest1/*cpnest_*" % (out_dir)) + resume_files=glob.glob("%s_cpnest1/*.resume*" % (out_dir)) + pkl_files=glob.glob("%s_cpnest1/*.pkl*" % (out_dir)) + filelist = [png_files,hdf5_files,pickle_files,pkl_files,resume_files] + for file_idx,file_type in enumerate(filelist): + for file in file_type: + if file_idx == 2: + shutil.rmtree(file) + else: + os.remove(file) + print('finished running pe') + return test_samples_noisy,test_samples_noisefree,np.array([temp]),snr + + n_ptemcee_walkers = 250 + n_ptemcee_steps = 5000 + n_ptemcee_burnin = 4000 + # look for ptemcee sampler option + if np.any([r=='ptemcee1' for r in samplers]) or np.any([r=='ptemcee2' for r in samplers]) or np.any([r=='ptemcee' for r in samplers]): + + # run ptemcee sampler 1 + run_startt = time.time() + result = bilby.run_sampler( + likelihood=likelihood, priors=priors, sampler='ptemcee', +# nwalkers=n_ptemcee_walkers, nsteps=n_ptemcee_steps, nburn=n_ptemcee_burnin, plot=True, ntemps=8, + nsamples=10000, nwalkers=n_ptemcee_walkers, ntemps=8, plot=True, threads=10, + injection_parameters=injection_parameters, outdir=out_dir+'_'+samplers[-1], label=label, + save=False) + run_endt = time.time() + + # save test sample waveform + os.mkdir('%s_%s_h5py_files' % (out_dir,samplers[-1])) + hf = h5py.File('%s_%s_h5py_files/%s.h5py' % (out_dir,samplers[-1],label), 'w') + hf.create_dataset('noisy_waveform', data=test_samples_noisy) + hf.create_dataset('noisefree_waveform', data=test_samples_noisefree) + + # throw away samples with "bad" liklihood values + all_lnp = result.log_likelihood_evaluations + hf.create_dataset('log_like_eval', data=all_lnp) # save log likelihood evaluations +# max_lnp = np.max(all_lnp) +# idx_keep = np.argwhere(all_lnp>max_lnp-12.0).squeeze() +# all_lnp = all_lnp.reshape((n_ptemcee_steps - n_ptemcee_burnin,n_ptemcee_walkers)) + +# print('Identified bad liklihood points') + + # loop over randomised params and save samples + #for p in inf_pars: + for q,qi in result.posterior.items(): + #if p==q: + name = q + '_post' + print('saving PE samples for parameter {}'.format(q)) +# old_samples = np.array(qi).reshape((n_ptemcee_steps - n_ptemcee_burnin,n_ptemcee_walkers)) +# new_samples = np.array([]) +# for m in range(old_samples.shape[0]): +# new_samples = np.append(new_samples,old_samples[m,np.argwhere(all_lnp[m,:]>max_lnp-12.0).squeeze()]) + hf.create_dataset(name, data=np.array(qi)) +# hf.create_dataset(name+'_with_cut', data=np.array(new_samples)) + hf.create_dataset('runtime', data=(run_endt - run_startt)) + hf.close() + + # return samples if not doing a condor run + if condor_run == False: + # remove unecessary files + png_files=glob.glob("%s_ptemcee1/*.png*" % (out_dir)) + hdf5_files=glob.glob("%s_ptemcee1/*.hdf5*" % (out_dir)) + other_files=glob.glob("%s_ptemcee1/*ptemcee_*" % (out_dir)) + resume_files=glob.glob("%s_ptemcee1/*.resume*" % (out_dir)) + pkl_files=glob.glob("%s_ptemcee1/*.pkl*" % (out_dir)) + filelist = [png_files,hdf5_files,other_files,resume_files,pkl_files] + for file_idx,file_type in enumerate(filelist): + for file in file_type: + if file_idx == 2: + shutil.rmtree(file) + else: + os.remove(file) + print('finished running pe') + return test_samples_noisy,test_samples_noisefree,np.array([temp]),snr + + n_emcee_walkers = 250 + n_emcee_steps = 14000 + n_emcee_burnin = 4000 + # look for emcee sampler option + if np.any([r=='emcee1' for r in samplers]) or np.any([r=='emcee2' for r in samplers]) or np.any([r=='emcee' for r in samplers]): + + # run emcee sampler 1 + run_startt = time.time() + result = bilby.run_sampler( + likelihood=likelihood, priors=priors, sampler='emcee', a=1.4, thin_by=10, store=False, + nwalkers=n_emcee_walkers, nsteps=n_emcee_steps, nburn=n_emcee_burnin, + injection_parameters=injection_parameters, outdir=out_dir+samplers[-1], label=label, + save=False,plot=True) + run_endt = time.time() + + # save test sample waveform + os.mkdir('%s_h5py_files' % (out_dir+samplers[-1])) + hf = h5py.File('%s_h5py_files/%s.h5py' % ((out_dir+samplers[-1]),label), 'w') + hf.create_dataset('noisy_waveform', data=test_samples_noisy) + hf.create_dataset('noisefree_waveform', data=test_samples_noisefree) + + # throw away samples with "bad" liklihood values + all_lnp = result.log_likelihood_evaluations + hf.create_dataset('log_like_eval', data=all_lnp) # save log likelihood evaluations + max_lnp = np.max(all_lnp) +# idx_keep = np.argwhere(all_lnp>max_lnp-12.0).squeeze() + all_lnp = all_lnp.reshape((n_emcee_steps - n_emcee_burnin,n_emcee_walkers)) + + print('Identified bad liklihood points') + + print + + # loop over randomised params and save samples + #for p in inf_pars: + for q,qi in result.posterior.items(): + #if p==q: + name = q + '_post' + print('saving PE samples for parameter {}'.format(q)) + old_samples = np.array(qi).reshape((n_emcee_steps - n_emcee_burnin,n_emcee_walkers)) + new_samples = np.array([]) + for m in range(old_samples.shape[0]): + new_samples = np.append(new_samples,old_samples[m,np.argwhere(all_lnp[m,:]>max_lnp-12.0).squeeze()]) + hf.create_dataset(name, data=np.array(qi)) + hf.create_dataset(name+'_with_cut', data=np.array(new_samples)) + + hf.create_dataset('runtime', data=(run_endt - run_startt)) + hf.close() + + # return samples if not doing a condor run + if condor_run == False: + # remove unecessary files + png_files=glob.glob("%s_emcee1/*.png*" % (out_dir)) + hdf5_files=glob.glob("%s_emcee1/*.hdf5*" % (out_dir)) + other_files=glob.glob("%s_emcee1/*emcee_*" % (out_dir)) + resume_files=glob.glob("%s_emcee1/*.resume*" % (out_dir)) + pkl_files=glob.glob("%s_emcee1/*.pkl*" % (out_dir)) + filelist = [png_files,hdf5_files,other_files,resume_files,pkl_files] + for file_idx,file_type in enumerate(filelist): + for file in file_type: + if file_idx == 2: + shutil.rmtree(file) + else: + os.remove(file) + print('finished running pe') + return test_samples_noisy,test_samples_noisefree,np.array([temp]),snr + + print('finished running pe') + +def main(args): + + def get_params(): + params = dict( + sampling_frequency=args.samplingfrequency, + duration=args.duration, + N_gen=args.Ngen, + bounds=args.bounds, + fixed_vals=args.fixedvals, + rand_pars=list(args.randpars[0].split(',')), + inf_pars=list(args.infpars[0].split(',')), + ref_geocent_time=args.refgeocenttime, + training=eval(args.training), + do_pe=eval(args.dope), + label=args.label, + out_dir=args.outdir, + seed=args.seed, + samplers=list(args.samplers[0].split(',')), + condor_run=True + + ) + + return params + + params = get_params() + run(sampling_frequency=args.samplingfrequency, + duration=args.duration, + N_gen=args.Ngen, + bounds=args.bounds, + fixed_vals=args.fixedvals, + rand_pars=list(args.randpars[0].split(',')), + inf_pars=list(args.infpars[0].split(',')), + ref_geocent_time=args.refgeocenttime, + training=eval(args.training), + do_pe=eval(args.dope), + label=args.label, + out_dir=args.outdir, + seed=args.seed, + samplers=list(args.samplers[0].split(',')), + condor_run=True, + params=params) + +if __name__ == '__main__': + args = parser() + main(args) + diff --git a/dnn.py b/dnn.py index 8ceb1998753915584905b9ef14e89787e0b3c192..c0f9288de8e0c1fbbfda11d0e50a711144286309 100644 --- a/dnn.py +++ b/dnn.py @@ -1,10 +1,11 @@ import torch import torch.nn as nn import numpy as np +from siren import Sine class DNN(nn.Module): - def __init__(self, input_dim, par_dim, fc_layers = [], conv_layers = [], stride = 0, maxpool_size=2,device="cpu", dropout=0.1): + def __init__(self, input_dim, par_dim, fc_layers = [], conv_layers = [], device="cpu", dropout=0.1, num_channels = 1): """ args ---------- @@ -17,30 +18,35 @@ class DNN(nn.Module): fc_layers: list list of the fully connected layers [8, 4, 2] (8 neurons, 4 neurons, 2 neurons) conv_layers: list - list of the convolutional layers [(8, 5), (4, 3)] (8 filters of size 5, 4 filters of size 3) + list of the convolutional layers [(8, 5, 1, 2, 4), (4, 3, 1, 1, 2)] (8 filters of size 5 with stride 1, dilation 2, maxpool 4, 4 filters of size 3 ....) """ super().__init__() # define useful variables of network self.device = device self.input_dim = input_dim self.par_dim = par_dim + self.in_channels = num_channels # convolutional parts self.fc_layers = fc_layers self.conv_layers = conv_layers self.num_conv = len(self.conv_layers) - self.stride = stride - self.maxpool_size = maxpool_size + + if self.num_conv == 0: + self.input_dim = self.in_channels*self.input_dim + + #self.activation = Sine(w0=0.01) + self.activation = nn.LeakyReLU() - self.activation = nn.ReLU() - self.out_activation = nn.Sigmoid()#nn.Softmax() + self.out_activation = nn.LogSoftmax() + #self.out_activation = None#nn.LogSigmoid()#nn.LogSoftmax() self.drop = nn.Dropout(p=dropout) self.small_const = 1e-6 - self.maxpool = nn.MaxPool1d(self.maxpool_size) + - self.lin_network,self.conv_network = self.create_network("o", self.input_dim, self.par_dim, fc_layers = self.fc_layers, conv_layers = self.conv_layers) + self.out_network, self.lin_network,self.conv_network = self.create_network("o", self.input_dim, self.par_dim, fc_layers = self.fc_layers, conv_layers = self.conv_layers) def create_network(self, name, input_dim, output_dim, append_dim=0, mean=True, variance=True, fc_layers=[], conv_layers=[]): @@ -53,20 +59,23 @@ class DNN(nn.Module): """ conv_network = nn.Sequential() lin_network = nn.Sequential() + out_layer = nn.Sequential() layer_out_sizes = [] num_conv = len(conv_layers) num_fc = len(fc_layers) - inchannels = 1 + inchannels = self.in_channels insize = self.input_dim for i in range(num_conv): # padding half filter width padding = int(conv_layers[i][1]/2.) + maxpool = nn.MaxPool1d(conv_layers[i][4]) # add convolutional, activation and maxpooling layer - conv_network.add_module("r_conv{}".format(i),module = nn.Conv1d(inchannels, conv_layers[i][0], conv_layers[i][1], stride = self.stride,padding=padding)) + conv_network.add_module("r_conv{}".format(i),module = nn.Conv1d(inchannels, conv_layers[i][0], conv_layers[i][1], stride = conv_layers[i][2],padding=padding, dilation = conv_layers[i][3])) + #conv_network.add_module("batch_norm_conv{}".format(i), module = nn.BatchNorm1d(conv_layers[i][0])) conv_network.add_module("act_r_conv{}".format(i), module = self.activation) - conv_network.add_module("pool_r_conv{}".format(i),module = self.maxpool) + conv_network.add_module("pool_r_conv{}".format(i),module = maxpool) # calulate output size of conv layer - outsize = int(self.conv_out_size(insize, padding, 1, conv_layers[i][1], self.stride)/self.maxpool_size) + outsize = int(self.conv_out_size(insize, padding, conv_layers[i][3], conv_layers[i][1], conv_layers[i][2])/conv_layers[i][4]) layer_out_sizes.append((conv_layers[i][0],outsize)) insize = outsize inchannels = conv_layers[i][0] @@ -80,13 +89,17 @@ class DNN(nn.Module): for i in range(num_fc): # add fc layer and activation lin_network.add_module("r_lin{}".format(i),module=nn.Linear(layer_size, fc_layers[i])) + #lin_network.add_module("batch_norm_lin{}".format(i), module = nn.BatchNorm1d(fc_layers[i])) lin_network.add_module("act_r_lin{}".format(i),module=self.activation) layer_size = fc_layers[i] # output mean and variance of gaussian with size of latent space - lin_network.add_module("output_{}".format(name[0]),module=nn.Linear(layer_size, output_dim)) - lin_network.add_module("act_output_{}".format(name[0]),module=self.out_activation) - return lin_network, conv_network + out_layer.add_module("output_{}".format(name[0]),module=nn.Linear(layer_size, output_dim)) + if self.out_activation is not None: + out_layer.add_module("act_output_{}".format(name[0]),module=self.out_activation) + + + return out_layer, lin_network, conv_network def conv_out_size(self, in_dim, padding, dilation, kernel, stride): @@ -94,18 +107,16 @@ class DNN(nn.Module): def forward(self, y): """forward pass for training""" - y = torch.reshape(y, (-1, 1, self.input_dim)) if self.num_conv > 0 else y - conv = self.conv_network(torch.reshape(y, (-1, 1, self.input_dim))) if self.num_conv > 0 else y - lin_in = torch.flatten(conv,start_dim=1) if self.num_conv > 0 else y# flatten convolutional layers - out = self.lin_network(lin_in) # run fully connected network - return out.reshape(out.size(0)) + conv = self.conv_network(torch.reshape(y, (-1, self.in_channels, self.input_dim))) if self.num_conv > 0 else y + lin_in = torch.flatten(conv,start_dim=1)# flatten convolutional layers + out = self.drop(self.lin_network(lin_in)) # run fully connected network + out2 = self.out_network(out)#.reshape(out.size(0),self.par_dim) + return out2 + #return -self.out_network(out - 4) - 1#.reshape(out.size(0),self.par_dim) def test(model, y): """generating samples when testing the network """ num_data = y.size(0) - x_out = [] - # encode the data into latent space with r1(z,y) - for i in range(num_data): - x_out.append(model.forward(y[i]).cpu().numpy()) + x_out = model.forward(y).cpu().numpy() return np.array(x_out) diff --git a/gen_cbc.py b/gen_cbc.py new file mode 100644 index 0000000000000000000000000000000000000000..c0f92e343ab924a2f8cb04709d869ff1baa7b3c8 --- /dev/null +++ b/gen_cbc.py @@ -0,0 +1,100 @@ +import pickle as pkl +import numpy as np +import time +from datetime import datetime +import os +import matplotlib.pyplot as plt +from astropy.coordinates import SkyCoord +from astropy import units as u +from scipy.integrate import dblquad +from dynesty import NestedSampler +import random +from cbc_scripts import run + + +class GenCBC(): + + def __init__(): + pass + + def gen_test(params=params,bounds=bounds,fixed_vals=fixed_vals): + """ Generate testing sample time series and posteriors using Bayesian inference (bilby) + + Parameters + ---------- + params: dict + Dictionary containing run parameters + bounds: dict + Dictionary containing allowed bounds of GW source parameters + fixed_vals: dict + Dictionary containing the fixed values of GW source parameters + """ + + # Check for requried parameters files + if params == None or bounds == None or fixed_vals == None: + print('Missing either params file, bounds file or fixed vals file') + exit() + + # Load parameters files + with open(params, 'r') as fp: + params = json.load(fp) + with open(bounds, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals, 'r') as fp: + fixed_vals = json.load(fp) + + # Make testing set directory + os.system('mkdir -p %s' % params['test_set_dir']) + + # Add numerical label to samplers + for i in range(len(params['samplers'])): + if i == 0: + continue + else: + params['samplers'][i] = params['samplers'][i]+'1' + + # Make testing samples + for i in range(params['r']): + temp_noisy, temp_noisefree, temp_pars, temp_snr = run(sampling_frequency=params['ndata']/params['duration'], + duration=params['duration'], + N_gen=1, + ref_geocent_time=params['ref_geocent_time'], + bounds=bounds, + fixed_vals=fixed_vals, + rand_pars=params['rand_pars'], + inf_pars=params['inf_pars'], + label=params['bilby_results_label'] + '_' + str(i), + out_dir=params['pe_dir'], + samplers=params['samplers'], + training=False, + seed=params['testing_data_seed']+i, + do_pe=params['doPE'],det=params['det'], + psd_files=params['psd_files'], + use_real_det_noise=params['use_real_det_noise'], + use_real_events=params['use_real_events'], + samp_idx=i) + + signal_test_noisy = temp_noisy + signal_test_noisefree = temp_noisefree + signal_test_pars = temp_pars + signal_test_snr = temp_snr + + print("Generated: %s/%s_%s.h5py ..." % (params['test_set_dir'],params['bilby_results_label'],params['run_label'])) + + + # Save generated testing samples in h5py format + hf = h5py.File('%s/%s_%d.h5py' % (params['test_set_dir'],params['bilby_results_label'],i),'w') + for k, v in params.items(): + try: + hf.create_dataset(k,data=v) + hf.create_dataset('rand_pars', data=np.string_(params['rand_pars'])) + except: + pass + hf.create_dataset('x_data', data=signal_test_pars) + for k, v in bounds.items(): + hf.create_dataset(k,data=v) + hf.create_dataset('y_data_noisefree', data=signal_test_noisefree) + hf.create_dataset('y_data_noisy', data=signal_test_noisy) + hf.create_dataset('snrs', data=signal_test_snr) + hf.close() + return 0 diff --git a/gen_data.py b/gen_data.py index 90d3902f0a8ae54ff6c489aa207c6253159c298b..641ea5b60dcd26c954ec54f27977d7a528ac4454 100644 --- a/gen_data.py +++ b/gen_data.py @@ -20,12 +20,10 @@ class GenSin: self.package = package self.xs = np.linspace(0,1,length) self.dtype = dtype - if self.dtype == "sph": - self.transform_samples = self.transform_sph - if self.dtype == "cart2d": - self.transform_samples = self.transform_cart2d - if self.dtype == "cart3d": - self.transform_samples = self.transform_cart3d + self.parameters = [] + + self.maxlon = 2*np.pi + self.maxA = 1 def __len__(self): if self.package == "pytorch": @@ -38,28 +36,18 @@ class GenSin: def gen_sin(self, noise_var = 0.1): - lon = np.random.rand(1)[0]*2*np.pi - lat = np.arcsin(np.random.rand(1)[0]) - - ys = self.sin_model(self.xs, lon, lat) + np.random.normal(loc = 0, scale = noise_var, size = len(self.xs)) - if self.dtype == "sph": - return ys, np.array([lon,lat]) - elif self.dtype == "cart2d": - x = np.cos(lat)*np.cos(lon) - y = np.cos(lat)*np.sin(lon) - #z = np.sin(lat) - return ys, np.array([x,y]) - elif self.dtype == "cart3d": - r = 1. - x = r*np.cos(lat)*np.cos(lon) - y = r*np.cos(lat)*np.sin(lon) - z = r*np.sin(lat) - return ys, np.array([x,y,z]) + lon = np.random.rand(1)[0]*self.maxlon + A = np.random.rand(1)[0]*self.maxA + self.parameters.append([lon,A]) + + ys = self.sin_model(self.xs, lon, A) + np.random.normal(loc = 0, scale = noise_var, size = len(self.xs)) + + return ys, np.array([lon,A]) - def sin_model(self, lon, lat): - ys = np.cos(lat)*np.sin(2.*np.pi*self.xs + lon) - return (ys + 1)*0.5 + def sin_model(self, xs, lon, A): + ys = A*np.sin(2.*np.pi*4*xs + lon) + return ys def change_package(self): if self.package == "pytorch": @@ -69,57 +57,41 @@ class GenSin: self.data = np.array([np.array([self.data[0][i], self.data[1][i]]) for i in range(len(self.data[0]))]) self.package = "pytorch" - - def transform_sph(self, samples): - return samples + def nest_odds(self,data,noise_var): + + def loglikelihood_dynesty(theta): + lon,A = theta # unpack the parameters + # chi-squared (data, sigma and x are global variables defined early on in this notebook) + chisq = np.sum(((y-self.sin_model(self.xs, lon,A))/noise_var)**2) + return -0.5*chisq #- 0.5*np.log(2*np.pi*noise_var*noise_var) - def transform_cart2d(self, samples): - new_samples = [] - new_truths = [] - for test in range(len(samples[0])): - x = samples[0][test][:,0] - y = samples[0][test][:,1] - - alpha = np.arctan2(y,x) - r = np.sqrt(x**2 + y**2) - r[r > 1.] = 1. - delta = np.arccos(r) - - new_samples.append(np.array([alpha,delta]).T) - - t_x = samples[1][test][0] - t_y = samples[1][test][1] - t_alpha = np.arctan2(t_y,t_x) - t_delta = np.arccos(np.sqrt(t_x**2 + t_y**2)) - new_truths.append(np.array([t_alpha,t_delta])) - - return np.array(new_samples),np.array(new_truths) - - def transform_cart3d(self, samples): - new_samples = [] - new_truths = [] - for test in range(len(samples[0])): - x = samples[0][test][:,0] - y = samples[0][test][:,1] - z = samples[0][test][:,2] - - lon = np.arctan2(y,x) - r = np.sqrt(x**2 + y**2 + z**2) - lat = np.arcsin(z/r) - - new_samples.append(np.array([lon,lat,r]).T) - - t_x = samples[1][test][0] - t_y = samples[1][test][1] - t_z = samples[1][test][2] - - t_lon = np.arctan2(t_y,t_x) - t_r = np.sqrt(t_x*t_x + t_y*t_y + t_z*t_z) - t_lat = np.arcsin(t_z/t_r) - - new_truths.append(np.array([t_lon, t_lat, t_r])) - - return np.array(new_samples),np.array(new_truths) + # Define our uniform prior via the prior transform. + def prior_transform(theta): + lonprime,latprime = theta + lon = lonprime*self.maxlon # convert back to c + lat = latprime*self.maxA + return (lon, lat) + + nlive = 1024 # number of (initial) live points + bound = 'multi' # use MutliNest algorithm + sample = 'rwalk' # use the random walk to draw new samples + ndims = 2 # two parameters + num_test = len(data) + nestOdds = np.zeros(num_test) + sig_ev = np.zeros(num_test) + noise_ev = np.zeros(num_test) + for i in range(num_test): + y = data[i][0] + sampler = NestedSampler(loglikelihood_dynesty, prior_transform, ndims, + bound=bound, sample=sample, nlive=nlive) + sampler.run_nested(dlogz=0.1) + res = sampler.results + sig_ev[i] = res.logz[-1] + noise_ev[i] = 0.5*np.sum((y/noise_var)**2) + nestOdds[i] = np.exp(res.logz[-1] + 0.5*np.sum((y/noise_var)**2)) # value of logZ + dlogZerrdynesty = res.logzerr[-1] # estimate of the statistcal uncertainty on logZ + print('{}: {}/{} dynesty log-Odds = {:.4e}'.format(time.asctime(),i+1,num_test,np.log(nestOdds[i]))) + return nestOdds, sig_ev, noise_ev def gen_train_data(self, numdata, noise_var = 0.01): @@ -139,12 +111,13 @@ class GenSin: self.data = np.vstack(dataset[:,0]),np.vstack(dataset[:,1]) class GenLine: - def __init__(self, length=25, package = "pytorch"): + def __init__(self, length=25, package = "pytorch", onehot = False): """ Generate a simple stright line y = mx + c, where data is scaled between 0 and 1 """ self.xs = np.linspace(0,1.4,length) self.package = package + self.onehot = onehot def __len__(self): if self.package == "pytorch": @@ -163,10 +136,19 @@ class GenLine: #model = np.zeros(2) #model[np.random.randint(2,size=1)] = 1.0 model = float(np.random.randint(2,size=1)) - ys = model*self.line_model(self.xs, slope, off) + np.random.normal(loc = 0, scale = noise_var, size = len(self.xs)) + if self.onehot: + model_val = [0.0,1.0] if model == 1 else [1.0,0.0] + else: + model_val = [1.0] if model == 1 else [0.0] + + mod = model*self.line_model(self.xs, slope, off) + ys = mod + np.random.normal(loc = 0, scale = noise_var, size = len(self.xs)) + + snr = np.sum(mod**2)/(noise_var**2) + 1 #out = np.zeros(2) - #out[int(model)] = 1.0 - return ys/(max(self.xs)*self.maxm + self.maxc), np.array([model]) + #out[1-int(model)] = -1e40 + + return ys/(max(self.xs)*self.maxm + self.maxc), (np.array(model_val), snr) def line_model(self, x, m, c): return (m*x + c) @@ -177,11 +159,13 @@ class GenLine: for i in range(num_test): mod = data[i][1] y = data[i][0]*(max(self.xs)*self.maxm + self.maxc) + def integrand(c,m): return np.exp(-0.5*np.sum(((y - self.xs*m - c)/noise_var)**2) + 0.5*np.sum((y/noise_var)**2)) + trueOdds[i] = dblquad(integrand,0,1,lambda m: 0, lambda m: 1,epsrel = 1e-6, epsabs = 0)[0] print('true Odds = {:.2e}'.format(trueOdds[i])) - return trueOdds + return trueOdds, 0 def nest_odds(self,data,noise_var): @@ -203,17 +187,18 @@ class GenLine: sample = 'rwalk' # use the random walk to draw new samples ndims = 2 # two parameters num_test = len(data) - nestOdds = np.zeros(num_test) + nest_odds = np.zeros(num_test) + nest_errors = np.zeros(num_test) for i in range(num_test): - y = data[i][0] + y = data[i][0]*(max(self.xs)*self.maxm + self.maxc) sampler = NestedSampler(loglikelihood_dynesty, prior_transform, ndims, bound=bound, sample=sample, nlive=nlive) sampler.run_nested(dlogz=0.1) res = sampler.results - nestOdds[i] = np.exp(res.logz[-1] + 0.5*np.sum((y/noise_var)**2)) # value of logZ - dlogZerrdynesty = res.logzerr[-1] # estimate of the statistcal uncertainty on logZ - print('{}: {}/{} dynesty log-Odds = {:.4e}'.format(time.asctime(),i+1,num_test,np.log(nestOdds[i]))) - return nestOdds + nest_odds[i] = np.exp(res.logz[-1] + 0.5*np.sum((y/noise_var)**2)) # value of logZ + nest_errors[i] = res.logzerr[-1] # estimate of the statistcal uncertainty on logZ + print('{}: {}/{} dynesty log-Odds = {:.4e}'.format(time.asctime(),i+1,num_test,np.log(nest_odds[i]))) + return nest_odds, nest_errors def cvae_odds(self,samples,noise_var): @@ -260,6 +245,12 @@ class GenSG: self.xs = np.linspace(0,1,length) self.package = package self.onehot = onehot + self.maxamp = 1.0 + self.maxtau = 0.25 + self.maxt0 = 1.0 + self.maxphi0 = 2.0*np.pi + self.maxf0 = 1.0 + self.parameters = [] def __len__(self): if self.package == "pytorch": @@ -270,28 +261,30 @@ class GenSG: def __getitem__(self, index): return [self.data[index: index+1][0][0],self.data[index: index+1][0][1]] - def gen_SG(self, noise_var = 0.1): - self.maxamp = 1.0 - self.maxtau = 0.25 - self.maxt0 = 1.0 - self.maxphi0 = 2.0*np.pi - self.maxf0 = 1.0 - #A = self.maxamp*(10**np.random.uniform(-5,0, size=1)[0]) - A = self.maxamp*np.random.rand(1)[0] - tau = self.maxtau*np.random.rand(1)[0] - t0 = self.maxt0*np.random.rand(1)[0] - f0 = self.maxf0*np.random.rand(1)[0] - phi0 = self.maxphi0*np.random.rand(1)[0] - #model = np.zeros(2) - #model[np.random.randint(2,size=1)] = 1.0 - sigprob = 0.5 - model = float(np.random.randint(2,size=1)) - if self.onehot: - model_val = [0.0,1.0] if model == 1 else [1.0,0.0] - else: - model_val = 1.0 if model == 1 else 0.0 - ys = model*self.SG_model(self.xs, A, tau, t0, f0, phi0) + np.random.normal(loc = 0, scale = noise_var, size = len(self.xs)) - return ys, np.array(model_val) + def gen_SG(self, noise_var = 0.1,snr_lim = (0,1e6)): + snr = np.inf + while snr < snr_lim[0] or snr > snr_lim[1]: + #A = self.maxamp*(np.log10(np.random.uniform(1.5,6, size=1)[0])) + A = self.maxamp*np.random.rand(1)[0] + tau = self.maxtau*np.random.rand(1)[0] + t0 = self.maxt0*np.random.rand(1)[0] + f0 = self.maxf0*np.random.rand(1)[0] + phi0 = self.maxphi0*np.random.rand(1)[0] + + #model = np.zeros(2) + #model[np.random.randint(2,size=1)] = 1.0 + sigprob = 0.5 + model = float(np.random.randint(2,size=1)) + if self.onehot: + model_val = [0.0,1.0] if model == 1 else [1.0,0.0] + else: + model_val = 1.0 if model == 1 else 0.0 + mod = self.SG_model(self.xs, A, tau, t0, f0, phi0) + ys = model*mod + np.random.normal(loc = 0, scale = noise_var, size = len(self.xs)) + snr = model*np.sum(mod**2)/(noise_var**2) + if np.all([snr >= snr_lim[0], snr <= snr_lim[1]]) or model == 0: + self.parameters.append([A,tau,t0,f0,phi0,snr]) + return ys, (np.array(model_val),snr) def SG_model(self, x, A, tau, t0, f0, phi0): fmax = 0.5*len(self.xs) @@ -333,6 +326,7 @@ class GenSG: ndims = 5 # two parameters num_test = len(data) nestOdds = np.zeros(num_test) + nest_errors = np.zeros(num_test) for i in range(num_test): y = data[i][0] sampler = NestedSampler(loglikelihood_dynesty, prior_transform, ndims, @@ -340,9 +334,9 @@ class GenSG: sampler.run_nested(dlogz=0.1) res = sampler.results nestOdds[i] = np.exp(res.logz[-1] + 0.5*np.sum((y/noise_var)**2)) # value of logZ - dlogZerrdynesty = res.logzerr[-1] # estimate of the statistcal uncertainty on logZ + nest_errors[i] = res.logzerr[-1] # estimate of the statistcal uncertainty on logZ print('{}: {}/{} dynesty log-Odds = {:.4e}'.format(time.asctime(),i+1,num_test,np.log(nestOdds[i]))) - return nestOdds + return nestOdds, nest_errors #def cvae_odds(self,samples,noise_var): # num_test = len(samples) @@ -364,9 +358,10 @@ class GenSG: self.data = np.array([np.array([self.data[0][i], self.data[1][i]]) for i in range(len(self.data[0]))]) self.package = "pytorch" - def gen_train_data(self, numdata, noise_var = 0.1): + def gen_train_data(self, numdata, noise_var = 0.1, snr_lim=(0,1e5)): self.start_load = time.time() - dataset = np.array([self.gen_SG(noise_var = noise_var) for i in range(numdata)]) + dataset = np.array([self.gen_SG(noise_var = noise_var, snr_lim = snr_lim) for i in range(numdata)]) + #print(dataset[0:2]) if self.package == "pytorch": self.data = dataset elif self.package == "keras": diff --git a/gen_test_data.py b/gen_test_data.py index b97fe667babdcf9608ebb6b774223d4dd015d485..50f9827ffd2dc25f86bc9c291ffa75002565c5cc 100755 --- a/gen_test_data.py +++ b/gen_test_data.py @@ -19,7 +19,7 @@ import gen_data #import gen_cw import time -def gen_test_data(base_dir,num_test = 100, length=25, datatype="line", noise_var=0.1, odds_type = "num"): +def gen_test_data(base_dir,num_test = 100, length=25, datatype="line", noise_var=0.1, odds_type = "num", snr_lim = 10): if datatype == "sin": test_data = gen_data.GenSin(length=length,package="pytorch") @@ -32,7 +32,7 @@ def gen_test_data(base_dir,num_test = 100, length=25, datatype="line", noise_var test_data.gen_train_data(num_test, noise_var = noise_var) elif datatype == "SG": test_data = gen_data.GenSG(length=length, package = "pytorch") - test_data.gen_train_data(num_test, noise_var = noise_var) + test_data.gen_train_data(num_test, noise_var = noise_var, snr_lim = snr_lim) elif datatype == "gencw": test_data = gen_cw.GenCW() test_data.gen_train_data(num, noise_var = noise_var) @@ -42,7 +42,9 @@ def gen_test_data(base_dir,num_test = 100, length=25, datatype="line", noise_var odds = test_data.num_odds(test_data.data, noise_var) elif odds_type == "nest": odds = test_data.nest_odds(test_data.data,noise_var) - + elif odds_type == "both": + odds_num = test_data.num_odds(test_data.data, noise_var) + odds_nest = test_data.nest_odds(test_data.data,noise_var) save_dir = '{}/{}/ntest{}_xlen{}_noise{}/'.format(basedir,datatype,num_test,length,noise_var) @@ -51,9 +53,22 @@ def gen_test_data(base_dir,num_test = 100, length=25, datatype="line", noise_var with open(os.path.join(save_dir, "modelclass.pkl"),"wb") as f: pickle.dump(test_data,f) - - with open(os.path.join(save_dir, "{}_odds.pkl").format(odds_type),"wb") as f: - pickle.dump(odds,f) + + if odds_type == "both": + with open(os.path.join(save_dir, "nest_odds.pkl"),"wb") as f: + pickle.dump(odds_nest[0],f) + with open(os.path.join(save_dir, "nest_odds_error.pkl"),"wb") as f: + pickle.dump(odds_nest[1],f) + with open(os.path.join(save_dir, "num_odds.pkl"),"wb") as f: + pickle.dump(odds_num[0],f) + with open(os.path.join(save_dir, "num_odds_error.pkl"),"wb") as f: + pickle.dump(odds_num[1],f) + + else: + with open(os.path.join(save_dir, "{}_odds.pkl").format(odds_type),"wb") as f: + pickle.dump(odds[0],f) + with open(os.path.join(save_dir, "{}_odds_error.pkl").format(odds_type),"wb") as f: + pickle.dump(odds[1],f) print('{}: saved results to {}'.format(time.asctime(),save_dir)) @@ -62,15 +77,16 @@ if __name__ == "__main__": # data parameters - num_test = 200 - length = 25 + num_test = 300 + length = 128 datatype = "SG" - noise_var = 0.01 + noise_var = 0.1 odds_type = "nest" + snr_lim = (0,1000) basedir = "/home/joseph.bayley/data/cvae_odds/test/" - gen_test_data(basedir, num_test=num_test, length = length, datatype = datatype, noise_var=noise_var, odds_type=odds_type) + gen_test_data(basedir, num_test=num_test, length = length, datatype = datatype, noise_var=noise_var, odds_type=odds_type, snr_lim = snr_lim) diff --git a/load_data_cbc.py b/load_data_cbc.py new file mode 100644 index 0000000000000000000000000000000000000000..b772616e91c1763b5c9f26d1e434805b8abedd47 --- /dev/null +++ b/load_data_cbc.py @@ -0,0 +1,753 @@ +import natsort +import os +import h5py +import numpy as np +#import tensorflow as tf +import torch +import time +from lal import GreenwichMeanSiderealTime +from astropy.time import Time +from astropy import coordinates as coord +from torch.utils.data import DataLoader + +class LoadCBC(DataLoader): + + def __init__(self, input_dir, batch_size = 512, params=None, bounds=None, masks = None, fixed_vals = None, test_set = False, silent = True, chunk_batch = 40, val_set = False): + + self.params = params + self.bounds = bounds + self.masks = masks + self.fixed_vals = fixed_vals + self.input_dir = input_dir + self.test_set = test_set + self.val_set = val_set + self.silent = silent + self.shuffle = False + self.batch_size = batch_size + + #load all filenames + self.get_all_filenames() + # get number of data examples as give them indicies + self.num_data = len(self.filenames)*self.params["tset_split"] + self.num_dets = len(self.params["det"]) + self.indices = np.arange(self.num_data) + + self.chunk_batch = chunk_batch + self.chunk_size = self.batch_size*chunk_batch + self.init_chunk_size = self.chunk_size + self.chunk_iter = 0 + self.max_chunk_num = int(np.floor((self.num_data/self.chunk_size))) + + self.num_epoch_load = 4 + self.epoch_iter = 0 + # will addthis to init files eventually + self.params["noiseamp"] = 1 + + + def __len__(self): + """ number of batches per epoch""" + return(int(self.chunk_batch)) + #return int(np.floor(len(self.filenames)*self.params["tset_split"]/self.batch_size)) + + def load_next_chunk(self): + """ + Loads in one chunk of data where the size if set by chunk_size + """ + # re initialise the chunk size to the original + self.chunk_size = self.init_chunk_size + + if self.test_set or self.val_set: + self.X, self.Y_noisefree, self.Y_noisy, self.snrs = self.load_waveforms(self.filenames, None) + else: + if self.chunk_iter >= self.max_chunk_num or (self.chunk_iter+1)*self.chunk_size> self.num_data: + print("Reached maximum number of chunks, restarting index and shuffling files") + self.chunk_iter = 0 + np.random.shuffle(self.filenames) + + start_load = time.time() + #print("Loading data from chunk {}".format(self.chunk_iter)) + # get the data indices for given chunk + temp_chunk_indices = self.indices[self.chunk_iter*self.chunk_size:(self.chunk_iter + 1)*self.chunk_size] + # get the filenames which these data indices live, take set to get single file index + temp_filename_indices = np.array(list(set(np.floor(temp_chunk_indices/self.params["tset_split"])))).astype(int) + # rewrite the data indices as the index within each file + temp_chunk_indices = temp_chunk_indices % self.params["tset_split"] + # if the index falls to zero then split as the file is the next one + temp_chunk_indices_split = np.split(temp_chunk_indices, np.where(np.diff(temp_chunk_indices) < 0)[0] + 1) + + self.X, self.Y_noisefree, self.Y_noisy, self.snrs = self.load_waveforms(self.filenames[temp_filename_indices], temp_chunk_indices_split) + + # change the chunk size to the actual size of data (re initialised at every chunk load) + self.chunk_size = len(self.X) + self.chunk_batch = np.floor(self.chunk_size/self.batch_size) + end_load = time.time() + print("load_time chunk {}: {}".format(self.chunk_iter, end_load - start_load)) + print("num files loaded: {}".format(self.chunk_size)) + + # check for infs or nans in training data + failed = False + """ + if np.any(np.isnan(self.X)) or not np.all(np.isfinite(self.X)): + print("NaN of Inf in parameters") + failed = True + elif np.any(np.isnan(self.Y_noisefree)) or not np.all(np.isfinite(self.Y_noisefree)): + print("NaN or Inf in y data") + failed = True + """ + + if failed is True: + # if infs or nans are present reload and augment this chunk until there are no nans + self.load_next_chunk() + else: + # otherwise iterate to next chunk + self.chunk_iter += 1 + + + def __getitem__(self, index = 0): + """ + get waveforms from data + X: wavefrom parameters + Y: waveform + """ + if self.test_set or self.val_set: + # parameters, waveform noisefree, waveform noisy, snrs + X, Y_noisefree, Y_noisy, snrs = self.X, self.Y_noisefree, self.Y_noisy, self.snrs + if self.test_set: + return np.array(Y_noisefree), np.array(X) + elif self.val_set: + return torch.Tensor(Y_noisefree), torch.Tensor(X) + else: + + start_index = index*self.batch_size #- (self.chunk_iter - 1)*self.chunk_size + end_index = (index+1)*self.batch_size #- (self.chunk_iter - 1)*self.chunk_size + X, Y_noisefree = self.X[start_index:end_index], self.Y_noisefree[start_index:end_index] + return torch.Tensor(Y_noisefree), torch.Tensor(X) + + + def on_epoch_end(self): + """Updates indices after each epoch + """ + self.epoch_iter += 1 + if self.epoch_iter > self.num_epoch_load and not self.test_set and not self.val_set: + self.load_next_chunk() + self.epoch_iter = 0 + + if self.shuffle == True: + np.random.shuffle(self.indices) + + + def get_all_filenames(self): + """ + Get a list of all of the filenames containing the waveforms + """ + # Sort files by number index in file name using natsorted program + self.filenames = np.array(natsort.natsorted(os.listdir(self.input_dir),reverse=False)) + + def load_waveforms(self, filenames, indices = None): + """ + load all the data from the filenames paths + args + --------- + filenames : list + list of filenames as string + indices: list + list of indices to get data from filenames. if multiple filenames, must be same length as filenames with the indices from each + + x_data : parameters + y_data_noisefree : waveform without noise + y_data_noisy : waveform with noise + """ + + data={'x_data': [], 'y_data_noisefree': [], 'y_data_noisy': [], 'rand_pars': [], 'snrs': []} + + #idx = np.sort(np.random.choice(self.params["tset_split"],self.params["batch_size"],replace=False)) + + for i,filename in enumerate(filenames): + try: + h5py_file = h5py.File(os.path.join(self.input_dir,filename), 'r') + # dont like the below code, will rewrite at some point + if self.test_set: + data['x_data'].append(h5py_file['x_data']) + data['y_data_noisefree'].append([h5py_file['y_data_noisefree']]) + data['y_data_noisy'].append([h5py_file['y_data_noisy']]) + data['rand_pars'] = h5py_file['rand_pars'] + data['snrs'].append(h5py_file['snrs']) + else: + #print("filname", np.shape(h5py_file["x_data"]), np.shape(h5py_file["y_data_noisefree"]), self.input_dir, filename) + if indices is None: + indices = [np.arange(np.shape(h5py_file["x_data"])[0]), ] + data['x_data'].append(h5py_file['x_data'][indices[i]]) + data['y_data_noisefree'].append(h5py_file['y_data_noisefree'][indices[i]]) + data['rand_pars'] = [i for i in h5py_file['rand_pars']] + data['snrs'].append(h5py_file['snrs'][indices[i]]) + if not self.silent: + print('...... Loaded file ' + os.path.join(self.input_dir,filename)) + except OSError: + print('Could not load requested file') + continue + + + if len(data["x_data"]) == 0 or len(data["y_data_noisefree"]) ==0: + print("No data loaded") + + # concatentation all the x data (parameters) from each of the files + data['x_data'] = np.concatenate(np.array(data['x_data']), axis=0).squeeze() + # concatenate, then transpose the dimensions for keras, from (num_templates, num_dets, num_samples) to (num_templates, num_samples, num_dets) + data['y_data_noisefree'] = np.transpose(np.concatenate(np.array(data['y_data_noisefree']), axis=0),[0,2,1]) + if self.test_set: + data['y_data_noisy'] = np.transpose(np.concatenate(np.array(data['y_data_noisy']), axis=0),[0,2,1]) + data['snrs'] = np.concatenate(np.array(data['snrs']), axis=0).squeeze() + + # temporary code to set a mass range + for i,k in enumerate(data['rand_pars']): + if k.decode('utf-8')=='mass_1': + m1_idx = i + if k.decode('utf-8')=='mass_2': + m2_idx = i + + low_m = 2 + high_m = 102 + + mass_inrange = (data["x_data"][:,m1_idx] > low_m)&(data["x_data"][:,m1_idx] < high_m)&(data["x_data"][:,m2_idx] < high_m)&(data["x_data"][:,m1_idx] > low_m) + + data["x_data"] = data["x_data"][mass_inrange] + data["y_data_noisefree"] = data["y_data_noisefree"][mass_inrange] + if self.test_set: + data["y_data_noisy"] = data["y_data_noisy"][mass_inrange] + try: + data["snrs"] = data["snrs"][mass_inrange] + except: + pass + + + + # convert the parameters from right ascencsion to hour angle + data['x_data'] = convert_ra_to_hour_angle(data['x_data'], self.params, self.params['rand_pars']) + + # convert phi to X=phi+psi and psi on ranges [0,pi] and [0,pi/2] repsectively - both periodic and in radians + for i,k in enumerate(data['rand_pars']): + if k.decode('utf-8')=='psi': + psi_idx = i + if k.decode('utf-8')=='phase': + phi_idx = i + + data['x_data'][:,psi_idx], data['x_data'][:,phi_idx] = psiphi_to_psiX(data['x_data'][:,psi_idx],data['x_data'][:,phi_idx]) + + decoded_rand_pars, par_idx = self.get_infer_pars(data) + + for i,k in enumerate(decoded_rand_pars): + par_min = k + '_min' + par_max = k + '_max' + + # Ensure that psi is between 0 and pi + if par_min == 'psi_min': + data['x_data'][:,i] = data['x_data'][:,i]/(np.pi/2.0) + #data['x_data'][:,i] = np.remainder(data['x_data'][:,i], np.pi) + elif par_min=='phase_min': + data['x_data'][:,i] = data['x_data'][:,i]/np.pi + else: + data['x_data'][:,i]=(data['x_data'][:,i] - self.bounds[par_min]) / (self.bounds[par_max] - self.bounds[par_min]) + # normalize each parameter by its bounds + #data['x_data'][:,i] = (data['x_data'][:,i] - self.bounds[par_min]) / (self.bounds[par_max] - self.bounds[par_min]) + + if not self.silent: + print('...... {} will be inferred'.format(infparlist)) + + # cast data to floats + data["x_data"] = torch.Tensor(data["x_data"]) + data["x_signal"] = torch.ones(np.shape(data["y_data_noisefree"])) + data["y_data_signal"] = torch.Tensor(data['y_data_noisefree']) + data["y_data_noisy"] = torch.Tensor(data['y_data_noisy']) + data["snrs"] = torch.Tensor(data["snrs"]) + + # randomise phase, time and distance + y_normscale = self.params['y_normscale'] + if not self.test_set: + if not self.val_set: + data["x_data"], time_correction = self.randomise_time(data["x_data"]) + data["x_data"], phase_correction = self.randomise_phase(data["x_data"], data["y_data_signal"]) + data["x_data"], distance_correction = self.randomise_distance(data["x_data"], data["y_data_signal"]) + + # apply phase, time and distance corrections + y_temp_fft = torch.fft.rfft(data["y_data_signal"].permute(0,2,1))*phase_correction*time_correction + data["y_data_signal"] = torch.fft.irfft(y_temp_fft).permute(0,2,1)*distance_correction + del y_temp_fft + + # add noise to the noisefree waveforms and normalise and normalise + + data["y_data_signal"] = (data["y_data_signal"] + self.params["noiseamp"]*torch.normal(size=data["y_data_signal"].size(), mean=0.0, std=1.0))/y_normscale + + data["x_signal"] = torch.ones(data["y_data_signal"].size()[0]) + # create the noise data and labels + data["y_data_noise"] = self.params["noiseamp"]*torch.normal(size=data["y_data_signal"].size(), mean=0.0, std=1.0)/y_normscale + data["x_noise"] = torch.zeros(data["y_data_signal"].size()[0]) + data["snrs_noise"] = torch.zeros(data["y_data_signal"].size()[0]) + # concatenate the two datasets + data["x_data"] = torch.cat((data["x_signal"], data["x_noise"]), dim = 0) + data["y_data"] = torch.cat((data["y_data_signal"], data["y_data_noise"]), dim = 0) + data["snrs"] = torch.cat((data["snrs"], data["snrs_noise"]), dim = 0) + # shuffle the datasets + randinds = np.random.randint(0, len(data["x_data"]),len(data["x_data"])) + return data['x_data'][randinds], data['y_data'][randinds], None, data['snrs'][randinds] + else: + data["y_data_noisy"] = data["y_data_noisy"]/y_normscale + data["x_data"] = torch.ones(data["y_data_noisy"].size()[0]) + """ + data["y_data_noise"] = self.params["noiseamp"]*torch.normal(size=data["y_data_noisy"].size(), mean=0.0, std=1.0)/y_normscale + data["x_noise"] = torch.zeros(data["y_data_noisy"].size()[0]) + data["snrs_noise"] = torch.zeros(data["y_data_noisy"].size()[0]) + data["x_data"] = torch.cat((data["x_data"], data["x_noise"]), dim = 0) + data["y_data_noisy"] = torch.cat((data["y_data_noisy"], data["y_data_noise"]), dim = 0) + data["snrs"] = torch.cat((data["snrs"], data["snrs_noise"]), dim = 0) + """ + return data['x_data'], data['y_data_signal'], data['y_data_noisy'],data['snrs'] + + + def get_infer_pars(self,data): + + # Get rid of encoding + + decoded_rand_pars = [] + for i,k in enumerate(data['rand_pars']): + decoded_rand_pars.append(k.decode('utf-8')) + + # extract inference parameters from all source parameters loaded earlier + # choose the indicies of which parameters to infer + par_idx = [] + infparlist = '' + for k in self.params["inf_pars"]: + infparlist = infparlist + k + ', ' + for i,q in enumerate(decoded_rand_pars): + if k==q: + par_idx.append(i) + + return decoded_rand_pars, par_idx + + + def randomise_phase(self, x, y): + """ randomises phase of input parameter x""" + # get old phase and define new phase + old_phase = self.bounds['phase_min'] + x[:,self.masks["phase_mask"]]*(self.bounds['phase_max'] - self.bounds['phase_min']) + new_x = torch.rand(size=old_phase.size()) + new_phase = self.bounds['phase_min'] + new_x*(self.bounds['phase_max'] - self.bounds['phase_min']) + # defice + x[:,self.masks["phase_mask"]] = new_x + #x = torch.gather(torch.cat([torch.reshape(x[:,self.masks["not_phase_mask"]],[-1,x.size()[1]-1]), torch.reshape(new_x,[-1,1])],dim=1),torch.Tensor(self.masks["idx_phase_mask"]),dim=1) + + phase_correction = -1.0*torch.complex(torch.cos(new_phase-old_phase),torch.sin(new_phase-old_phase)) + phase_correction = torch.tile(torch.unsqueeze(phase_correction,dim=1),(1,self.num_dets,int(y.size()[1]/2) + 1)) + return x, phase_correction + + def randomise_time(self, x): + + old_geocent = self.bounds['geocent_time_min'] + x[:,self.masks["geocent_mask"]]*(self.bounds['geocent_time_max'] - self.bounds['geocent_time_min']) + new_x = torch.rand(size=old_geocent.size()) + + new_geocent = self.bounds['geocent_time_min'] + new_x*(self.bounds['geocent_time_max'] - self.bounds['geocent_time_min']) + + x[:,self.masks["geocent_mask"]] = new_x + #x = torch.gather(torch.cat([torch.reshape(x[:,self.masks["not_geocent_mask"]],[-1,x.size()[1]-1]), torch.reshape(new_x,[-1,1])],dim=1),torch.Tensor(self.masks["idx_geocent_mask"]),dim=1) + + fvec = torch.range(0,np.floor(self.params['ndata']/2))/self.params['duration'] + + time_correction = -2.0*np.pi*fvec*(new_geocent-old_geocent) + time_correction = torch.complex(torch.cos(time_correction),torch.sin(time_correction)) + time_correction = torch.tile(torch.unsqueeze(time_correction,dim=1),(1,self.num_dets,1)) + + return x, time_correction + + def randomise_distance(self,x, y): + + old_d = self.bounds['luminosity_distance_min'] + x[:,self.masks["dist_mask"]]*(self.bounds['luminosity_distance_max'] - self.bounds['luminosity_distance_min']) + new_x = torch.rand(size=old_d.size()) + new_d = self.bounds['luminosity_distance_min'] + new_x*(self.bounds['luminosity_distance_max'] - self.bounds['luminosity_distance_min']) + x[:,self.masks["dist_mask"]] = new_x + #x = torch.gather(torch.cat([torch.reshape(x[:,self.masks["not_dist_mask"]],[-1,x.size()[1]-1]), torch.reshape(new_x,[-1,1])],dim=1),torch.Tensor(self.masks["idx_dist_mask"]),dim=1) + dist_scale = torch.tile(torch.unsqueeze(old_d/new_d,dim=1),(1,y.size()[1],1)) + + return x, dist_scale + +def psiphi_to_psiX(psi,phi): + """ + Rescales the psi,phi parameters to a different space + Input and Output in radians + """ + X = np.remainder(psi+phi,np.pi) + psi = np.remainder(psi,np.pi/2.0) + return psi,X + +def psiX_to_psiphi(psi,X): + """ + Rescales the psi,X parameters back to psi, phi + Input and Output innormalised units [0,1] + """ + r = np.random.randint(0,2,size=(psi.shape[0],2)) + psi *= np.pi/2.0 # convert to radians + X *= np.pi # convert to radians + phi = np.remainder(X-psi + np.pi*r[:,0] + r[:,1]*np.pi/2.0,2.0*np.pi).flatten() + psi = np.remainder(psi + np.pi*r[:,1]/2.0, np.pi).flatten() + return psi/np.pi,phi/(np.pi*2.0) # convert back to normalised [0,1] + + + + +############### +## Old Load data script +############### + +def load_data(params,bounds,fixed_vals,input_dir,inf_pars,test_data=False,silent=False): + """ Function to load either training or testing data. + """ + + # Get list of all training/testing files and define dictionary to store values in files + if type("%s" % input_dir) is str: + dataLocations = ["%s" % input_dir] + else: + print('ERROR: input directory not a string') + exit(0) + + # Sort files by number index in file name using natsorted program + filenames = sorted(os.listdir(dataLocations[0])) + filenames = natsort.natsorted(filenames,reverse=False) + + # If loading by chunks, randomly shuffle list of training/testing filenames + if params['load_by_chunks'] == True and not test_data: + nfiles = np.min([int(params['load_chunk_size']/float(params['tset_split'])),len(filenames)]) + files_idx = np.random.randint(0,len(filenames),nfiles) + filenames= np.array(filenames)[files_idx] + if not silent: + print('...... shuffled filenames since we are loading in by chunks') + + # Iterate over all training/testing files and store source parameters, time series and SNR info in dictionary + data={'x_data': [], 'y_data_noisefree': [], 'y_data_noisy': [], 'rand_pars': [], 'snrs': []} + for filename in filenames: + if test_data: + # Don't load files which are not consistent between samplers + for samp_idx_inner in params['samplers'][1:]: + inner_file_existance = True + dataLocations_inner = '%s_%s' % (params['pe_dir'],samp_idx_inner+'1') + filename_inner = '%s/%s_%d.h5py' % (dataLocations_inner,params['bilby_results_label'],int(filename.split('_')[-1].split('.')[0])) + # If file does not exist, skip to next file + inner_file_existance = os.path.isfile(filename_inner) + if inner_file_existance == False: + break + + if inner_file_existance == False: + print('File not consistent beetween samplers') + continue + try: + data['x_data'].append(h5py.File(dataLocations[0]+'/'+filename, 'r')['x_data'][:]) + data['y_data_noisefree'].append(h5py.File(dataLocations[0]+'/'+filename, 'r')['y_data_noisefree'][:]) + if test_data: + data['y_data_noisy'].append(h5py.File(dataLocations[0]+'/'+filename, 'r')['y_data_noisy'][:]) + data['rand_pars'] = h5py.File(dataLocations[0]+'/'+filename, 'r')['rand_pars'][:] + data['snrs'].append(h5py.File(dataLocations[0]+'/'+filename, 'r')['snrs'][:]) + if not silent: + print('...... Loaded file ' + dataLocations[0] + '/' + filename) + except OSError: + print('Could not load requested file') + continue + if np.array(data['y_data_noisefree']).ndim == 3: + data['y_data_noisefree'] = np.expand_dims(np.array(data['y_data_noisefree']),axis=0) + data['y_data_noisy'] = np.expand_dims(np.array(data['y_data_noisy']),axis=0) + data['x_data'] = np.concatenate(np.array(data['x_data']), axis=0).squeeze() + data['y_data_noisefree'] = np.transpose(np.concatenate(np.array(data['y_data_noisefree']), axis=0),[0,2,1]) + if test_data: + data['y_data_noisy'] = np.transpose(np.concatenate(np.array(data['y_data_noisy']), axis=0),[0,2,1]) + data['snrs'] = np.concatenate(np.array(data['snrs']), axis=0) + + # Get rid of weird encoding bullshit + decoded_rand_pars = [] + for i,k in enumerate(data['rand_pars']): + decoded_rand_pars.append(k.decode('utf-8')) + + # convert ra to hour angle + if test_data and len(data['x_data'].shape) == 1: + data['x_data'] = np.expand_dims(data['x_data'], axis=0) + if test_data: + data['x_data'] = convert_ra_to_hour_angle(data['x_data'], params, decoded_rand_pars) + else: + data['x_data'] = convert_ra_to_hour_angle(data['x_data'], params, params['rand_pars']) + + # Normalise the source parameters + #if test_data: + # print(data['x_data']) + # exit() + for i,k in enumerate(decoded_rand_pars): + par_min = k + '_min' + par_max = k + '_max' + + # Ensure that psi is between 0 and pi + if par_min == 'psi_min': + data['x_data'][:,i] = np.remainder(data['x_data'][:,i], np.pi) + + # normalize by bounds + data['x_data'][:,i]=(data['x_data'][:,i] - bounds[par_min]) / (bounds[par_max] - bounds[par_min]) + + # extract inference parameters from all source parameters loaded earlier + idx = [] + infparlist = '' + for k in inf_pars: + infparlist = infparlist + k + ', ' + for i,q in enumerate(decoded_rand_pars): + m = q + if k==m: + idx.append(i) + data['x_data'] = data['x_data'][:,idx] + if not silent: + print('...... {} will be inferred'.format(infparlist)) + + return data['x_data'], data['y_data_noisefree'], data['y_data_noisy'], data['snrs'] + +def convert_ra_to_hour_angle(data, params, pars, single=False): + """ + Converts right ascension to hour angle and back again + """ + + greenwich = coord.EarthLocation.of_site('greenwich') + t = Time(params['ref_geocent_time'], format='gps', location=greenwich) + t = t.sidereal_time('mean', 'greenwich').radian + + # compute single instance + if single: + return t - data + + for i,k in enumerate(pars): + if k == 'ra': + ra_idx = i + + # Check if RA exist + try: + ra_idx + except NameError: + print('...... RA is fixed. Not converting RA to hour angle.') + else: + # Iterate over all training samples and convert to hour angle + for i in range(data.shape[0]): + data[i,ra_idx] = t - data[i,ra_idx] + + return data + +def convert_hour_angle_to_ra(data, params, pars, single=False): + """ + Converts right ascension to hour angle and back again + """ + greenwich = coord.EarthLocation.of_site('greenwich') + t = Time(params['ref_geocent_time'], format='gps', location=greenwich) + t = t.sidereal_time('mean', 'greenwich').radian + + # compute single instance + if single: + return np.remainder(t - data,2.0*np.pi) + + for i,k in enumerate(pars): + if k == 'ra': + ra_idx = i + + # Check if RA exist + try: + ra_idx + except NameError: + print('...... RA is fixed. Not converting RA to hour angle.') + else: + # Iterate over all training samples and convert to hour angle + for i in range(data.shape[0]): + data[i,ra_idx] = np.remainder(t - data[i,ra_idx],2.0*np.pi) + + return data + + +def load_samples(params,sampler,pp_plot=False, bounds=None): + """ + read in pre-computed posterior samples + """ + if type("%s" % params['pe_dir']) is str: + # load generated samples back in + dataLocations = '%s_%s' % (params['pe_dir'],sampler+'1') + print('... looking in {} for posterior samples'.format(dataLocations)) + else: + print('ERROR: input samples directory not a string') + exit(0) + + # Iterate over requested number of testing samples to use +# for i in range(params['r']): + i = i_idx = 0 + for samp_idx,samp in enumerate(params['samplers'][1:]): + if samp == sampler: + samp_idx = samp_idx + break + while i_idx < params['r']: + + filename = '%s/%s_%d.h5py' % (dataLocations,params['bilby_results_label'],i) + for samp_idx_inner in params['samplers'][1:]: + inner_file_existance = True + if samp_idx_inner == samp_idx: + inner_file_existance = os.path.isfile(filename) + if inner_file_existance == False: + break + else: + continue + + dataLocations_inner = '%s_%s' % (params['pe_dir'],samp_idx_inner+'1') + filename_inner = '%s/%s_%d.h5py' % (dataLocations_inner,params['bilby_results_label'],i) + # If file does not exist, skip to next file + inner_file_existance = os.path.isfile(filename_inner) + if inner_file_existance == False: + break + + if inner_file_existance == False: + i+=1 + print('File does not exist for one of the samplers') + continue + #if not os.path.isfile(filename): + # print('... unable to find file {}. Exiting.'.format(filename)) + # exit(0) + + print('... Loading test sample -> ' + filename) + data_temp = {} + n = 0 + + # Retrieve all source parameters to do inference on + for q in params['bilby_pars']: + p = q + '_post' + par_min = q + '_min' + par_max = q + '_max' + data_temp[p] = h5py.File(filename, 'r')[p][:] + if p == 'psi_post': + data_temp[p] = np.remainder(data_temp[p],np.pi) + elif p == 'geocent_time_post': + data_temp[p] = data_temp[p] - params['ref_geocent_time'] + # Convert samples to hour angle if doing pp plot + if p == 'ra_post' and pp_plot: + data_temp[p] = convert_ra_to_hour_angle(data_temp[p], params, None, single=True) + data_temp[p] = (data_temp[p] - bounds[par_min]) / (bounds[par_max] - bounds[par_min]) + Nsamp = data_temp[p].shape[0] + n = n + 1 + print('... read in {} samples from {}'.format(Nsamp,filename)) + + # place retrieved source parameters in numpy array rather than dictionary + j = 0 + XS = np.zeros((Nsamp,n)) + for p,d in data_temp.items(): + XS[:,j] = d + j += 1 + print('... put the samples in an array') + + # Append test sample posteriors to existing array of other test sample posteriors + rand_idx_posterior = np.linspace(0,Nsamp-1,num=params['n_samples'],dtype=np.int) + np.random.shuffle(rand_idx_posterior) + rand_idx_posterior = rand_idx_posterior[:params['n_samples']] + + if i == 0: + XS_all = np.expand_dims(XS[rand_idx_posterior,:], axis=0) + else: + XS_all = np.vstack((XS_all,np.expand_dims(XS[rand_idx_posterior,:], axis=0))) + print('... appended {} samples to the total'.format(params['n_samples'])) + i+=1; i_idx+=1 + return XS_all + + +def get_params(params, bounds, fixed_vals, params_dir = "./params_files", print_masks=True): + """ + params_file = os.path.join(params_dir,'params.json') + bounds_file = os.path.join(params_dir,'bounds.json') + fixed_vals_file = os.path.join(params_dir,'fixed_vals.json') + + EPS = 1e-3 + + # Load parameters files + with open(params_file, 'r') as fp: + params = json.load(fp) + with open(bounds_file, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals_file, 'r') as fp: + fixed_vals = json.load(fp) + """ + EPS = 1e-3 + # if doing hour angle, use hour angle bounds on RA + bounds['ra_min'] = convert_ra_to_hour_angle(bounds['ra_min'],params,None,single=True) + bounds['ra_max'] = convert_ra_to_hour_angle(bounds['ra_max'],params,None,single=True) + print('... converted RA bounds to hour angle') + masks = {} + masks["inf_ol_mask"], masks["inf_ol_idx"], masks["inf_ol_len"] = get_param_index(params['inf_pars'],params['bilby_pars']) + masks["bilby_ol_mask"], masks["bilby_ol_idx"], masks["bilby_ol_len"] = get_param_index(params['bilby_pars'],params['inf_pars']) + + + # identify the indices of different sets of physical parameters + masks["vonmise_mask"], masks["vonmise_idx_mask"], masks["vonmise_len"] = get_param_index(params['inf_pars'],params['vonmise_pars']) + masks["gauss_mask"], masks["gauss_idx_mask"], masks["gauss_len"] = get_param_index(params['inf_pars'],params['gauss_pars']) + masks["sky_mask"], masks["sky_idx_mask"], masks["sky_len"] = get_param_index(params['inf_pars'],params['sky_pars']) + masks["ra_mask"], masks["ra_idx_mask"], masks["ra_len"] = get_param_index(params['inf_pars'],['ra']) + masks["dec_mask"], masks["dec_idx_mask"], masks["dec_len"] = get_param_index(params['inf_pars'],['dec']) + masks["m1_mask"], masks["m1_idx_mask"], masks["m1_len"] = get_param_index(params['inf_pars'],['mass_1']) + masks["m2_mask"], masks["m2_idx_mask"], masks["m2_len"] = get_param_index(params['inf_pars'],['mass_2']) + #idx_mask = np.argsort(gauss_idx_mask + vonmise_idx_mask + m1_idx_mask + m2_idx_mask + sky_idx_mask) # + dist_idx_mask) + masks["idx_mask"] = np.argsort(masks["m1_idx_mask"] + masks["m2_idx_mask"] + masks["gauss_idx_mask"] + masks["vonmise_idx_mask"]) # + sky_idx_mask) + masks["dist_mask"], masks["dist_idx_mask"], masks["dis_len"] = get_param_index(params['inf_pars'],['luminosity_distance']) + masks["not_dist_mask"], masks["not_dist_idx_mask"], masks["not_dist_len"] = get_param_index(params['inf_pars'],['mass_1','mass_2','psi','phase','geocent_time','theta_jn','ra','dec','a_1','a_2','tilt_1','tilt_2','phi_12','phi_jl']) + + masks["phase_mask"], masks["phase_idx_mask"], masks["phase_len"] = get_param_index(params['inf_pars'],['phase']) + masks["not_phase_mask"], masks["not_phase_idx_mask"], masks["not_phase_len"] = get_param_index(params['inf_pars'],['mass_1','mass_2','luminosity_distance','psi','geocent_time','theta_jn','ra','dec','a_1','a_2','tilt_1','tilt_2','phi_12','phi_jl']) + + masks["geocent_mask"], masks["geocent_idx_mask"], masks["geocent_len"] = get_param_index(params['inf_pars'],['geocent_time']) + masks["not_geocent_mask"], masks["not_geocent_idx_mask"], masks["not_geocent_len"] = get_param_index(params['inf_pars'],['mass_1','mass_2','luminosity_distance','psi','phase','theta_jn','ra','dec','a_1','a_2','tilt_1','tilt_2','phi_12','phi_jl']) + + masks["xyz_mask"], masks["xyz_idx_mask"], masks["xyz_len"] = get_param_index(params['inf_pars'],['luminosity_distance','ra','dec']) + masks["not_xyz_mask"], masks["not_xyz_idx_mask"], masks["not_xyz_len"] = get_param_index(params['inf_pars'],['mass_1','mass_2','psi','phase','geocent_time','theta_jn','a_1','a_2','tilt_1','tilt_2','phi_12','phi_jl']) + + masks["periodic_mask"], masks["periodic_idx_mask"], masks["periodic_len"] = get_param_index(params['inf_pars'],['phase','psi','phi_12','phi_jl']) + masks["nonperiodic_mask"], masks["nonperiodic_idx_mask"], masks["nonperiodic_len"] = get_param_index(params['inf_pars'],['mass_1','mass_2','luminosity_distance','geocent_time','theta_jn','a_1','a_2','tilt_1','tilt_2']) + + masks["idx_xyz_mask"] = np.argsort(masks["xyz_idx_mask"] + masks["not_xyz_idx_mask"]) + masks["idx_dist_mask"] = np.argsort(masks["not_dist_idx_mask"] + masks["dist_idx_mask"]) + masks["idx_phase_mask"] = np.argsort(masks["not_phase_idx_mask"] + masks["phase_idx_mask"]) + masks["idx_geocent_mask"] = np.argsort(masks["not_geocent_idx_mask"] + masks["geocent_idx_mask"]) + masks["idx_periodic_mask"] = np.argsort(masks["nonperiodic_idx_mask"] + masks["periodic_idx_mask"] + masks["ra_idx_mask"] + masks["dec_idx_mask"]) + if print_masks: + print(masks["xyz_mask"]) + print(masks["not_xyz_mask"]) + print(masks["idx_xyz_mask"]) + #masses_len = m1_len + m2_len + print(params['inf_pars']) + print(masks["vonmise_mask"],masks["vonmise_idx_mask"]) + print(masks["gauss_mask"],masks["gauss_idx_mask"]) + print(masks["m1_mask"],masks["m1_idx_mask"]) + print(masks["m2_mask"],masks["m2_idx_mask"]) + print(masks["sky_mask"],masks["sky_idx_mask"]) + print(masks["idx_mask"]) + + return params, bounds, masks, fixed_vals + + +def get_param_index(all_pars,pars,sky_extra=None): + """ + Get the list index of requested source parameter types + """ + # identify the indices of wrapped and non-wrapped parameters - clunky code + mask = [] + idx = [] + + # loop over inference params + for i,p in enumerate(all_pars): + + # loop over wrapped params + flag = False + for q in pars: + if p==q: + flag = True # if inf params is a wrapped param set flag + + # record the true/false value for this inference param + if flag==True: + mask.append(True) + idx.append(i) + elif flag==False: + mask.append(False) + + if sky_extra is not None: + if sky_extra: + mask.append(True) + idx.append(len(all_pars)) + else: + mask.append(False) + + return mask, idx, np.sum(mask) diff --git a/params_files_256_narrowspin_8pars/bounds.json b/params_files_256_narrowspin_8pars/bounds.json new file mode 100644 index 0000000000000000000000000000000000000000..3bbf0579eb5f09e0e1068cda121b7c0baca44d73 --- /dev/null +++ b/params_files_256_narrowspin_8pars/bounds.json @@ -0,0 +1,50 @@ +{ + "psi_min": 0.0, + "mass_1_min": 5.0, + "theta_jn_min": 0.0, + "luminosity_distance_max": 5000.0, + "M_min": 70.0, + "__definition__ra": "right ascension range", + "phi_jl_max": 6.283185307179586, + "theta_jn_max": 3.141592653589793, + "psi_max": 3.141592653589793, + "phi_jl_min": 0.0, + "luminosity_distance_min": 100.0, + "geocent_time_min": 0.15, + "mass_1_max": 100.0, + "__definition__a_1": "a1 range", + "__definition__a_2": "a2 range", + "__definition__luminosity_distance": "luminosity distance range", + "geocent_time_max": 0.35, + "ra_max": 6.283185307179586, + "ra_min": 0.0, + "tilt_1_min": 0.0, + "mass_2_max": 100.0, + "a_1_min": 0.0, + "mass_2_min": 5.0, + "a_1_max": 0.8, + "tilt_1_max": 3.141592653589793, + "a_2_max": 0.8, + "phase_min": 0.0, + "__definition__geocent_time": "time of coalescence range", + "phase_max": 6.283185307179586, + "a_2_min": 0.0, + "__definition__theta_jn": "inclination angle range", + "tilt_2_max": 3.141592653589793, + "__definition_phijl": "phijl range", + "__definition__phase": "phase range", + "__definition__mass_2": "mass 2 range", + "__definition__mass_1": "mass 1 range", + "__definition__dec": "declination range", + "__definition__M": "total mass range", + "__definition__psi": "psi range", + "phi_12_min": 0.0, + "phi_12_max": 6.283185307179586, + "dec_max": 1.5707963267948966, + "__definition__phi12": "phi12 range", + "tilt_2_min": 0.0, + "__definition__tilt1": "tilt1 range", + "__definition__tilt2": "tilt2 range", + "M_max": 160.0, + "dec_min": -1.5707963267948966 +} \ No newline at end of file diff --git a/params_files_256_narrowspin_8pars/fixed_vals.json b/params_files_256_narrowspin_8pars/fixed_vals.json new file mode 100644 index 0000000000000000000000000000000000000000..0434a30c65841f9752c904aa248015e658b965a8 --- /dev/null +++ b/params_files_256_narrowspin_8pars/fixed_vals.json @@ -0,0 +1,34 @@ +{ + "__definition__phi_jl": "phi_jl fixed value", + "__definition__mc": "chirp mass fixed value", + "__definition__psi": "psi fixed value", + "a_2": 0.0, + "__definition__geocent_time": "time of coalescence fixed value", + "geocent_time": 0.0, + "__definition__luminosity_distance": "luminosity distance fixed value", + "phi_jl": 0.0, + "psi": 0.0, + "__definition__tilt_2": "tilt_2 fixed value", + "phase": 0.0, + "mass_2": 50.0, + "mass_1": 50.0, + "phi_12": 0.0, + "__definition__ra": "right ascension fixed value", + "__definition__theta_jn": "inclination angle fixed value", + "a_1": 0.0, + "mc": null, + "theta_jn": 0.0, + "__definition__tilt1": "tilt_1 fixed value", + "tilt_1": 0.0, + "luminosity_distance": 2000.0, + "__definition__phase": "phase fixed value", + "__definition__mass_2": "m2 fixed value", + "ra": 1.375, + "__definition__a2": "a2 fixed value", + "__definition__mass_1": "m1 fixed value", + "__definition__dec": "declination fixed value", + "dec": -1.2108, + "tilt_2": 0.0, + "__definition__phi_12": "phi_12 fixed value", + "__definition__a1": "a1 fixed value" +} \ No newline at end of file diff --git a/params_files_256_narrowspin_8pars/params.json b/params_files_256_narrowspin_8pars/params.json new file mode 100644 index 0000000000000000000000000000000000000000..7adfe05f8f9e6ef4841cf6939fe1021edc32724d --- /dev/null +++ b/params_files_256_narrowspin_8pars/params.json @@ -0,0 +1,362 @@ +{ + "weighted_pars_factor": 1, + "n_filters_q": [ + 32, + 32, + 32, + 32 + ], + "__definition__pe_dir": "location of test set directory Bayesian PE samples. Set to None if you do not want to use posterior samples when training.", + "n_modes": 32, + "__definition__val_dataset_size": "validation dataset total size", + "filter_size_q": [ + 5, + 5, + 5, + 5 + ], + "__definition__vitamin_c": "If True, use new vitamin_c version of code with keras", + "pool_strides_r2": [ + 1, + 2, + 1, + 2 + ], + "__definition__sky_pars": "sky parameters", + "__definition__doPE": "if True then do bilby PE when generating new testing samples", + "__definition__batch_size": " Number training samples shown to neural network per iteration", + "__definition__val_set_dir": "location of validation set directory", + "__definition__bilby_pars": "parameters for bilby to infer", + "gpu_num": 3, + "load_by_chunks": true, + "__definition__y_normscale": "arbitrary normalization factor on all time series waveforms (helps convergence in training)", + "__definition__make_corner_plots": "if True, make corner plots", + "__definition__testing_data_seed": " tensorflow testing random seed number", + "ramp_start": 100, + "__definition__bilby_result_label": "label given to bilby results directory", + "__definition__inf_pars": "parameters to infer", + "make_kl_plot": false, + "__definition__filter_size_r1": "size of convolutional fitlers in r1 network", + "__definition__filter_size_r2": "size of convolutional filters in r2 network", + "__definition__duration": "length of training/validation/test sample time series in seconds", + "testing_data_seed": 44, + "__definition__make_pp_plot": "If True, go through pp plotting function", + "__definition__ramp_end": "ending iteration of KL divergence ramp", + "maxpool_r2": [ + 1, + 2, + 1, + 2 + ], + "maxpool_r1": [ + 1, + 2, + 1, + 2 + ], + "maxpool_q": [ + 1, + 2, + 1, + 2 + ], + "gauss_pars": [ + "luminosity_distance", + "geocent_time", + "theta_jn", + "a_1", + "a_2", + "tilt_1", + "tilt_2", + "ra", + "dec", + "phase", + "psi", + "phi_12", + "phi_jl" + ], + "__definition__corner_labels": "latex source parameter labels for plotting", + "__definition__print_values": "# optionally print loss values every report interval", + "__definition__plot_dir": "output directory to save results plots", + "filter_size_r1": [ + 5, + 5, + 5, + 5 + ], + "__definition__z_dimension": "number of latent space dimensions of model", + "__definition__convert_to_hour_angle": "If True, convert RA to hour angle during trianing and testing", + "__definition__n_filters_q": "number of convolutional filters to use in q network (must be divisible by 3)", + "conv_strides_q": [ + 1, + 1, + 1, + 1 + ], + "validation_data_seed": 45, + "__definition__run_label": "label of run", + "__definition__initial_training_rate": "initial training rate for ADAM optimiser inference model (inverse reconstruction)", + "__definition__pool_strides_r2": "size of max pool stride in r2 network", + "n_filters_r2": [ + 32, + 32, + 32, + 32 + ], + "n_filters_r1": [ + 32, + 32, + 32, + 32 + ], + "batch_norm": false, + "__definition__r": "number of GW timeseries to use for testing.", + "training_data_seed": 43, + "__definition__samplers": "Samplers to use when comparing ML results (vitamin is ML approach) dynesty,ptemcee,cpnest,emcee", + "__definition__load_chunk_size": "Number of training samples to load in at a time.", + "__definition__KL_cycles": " number of cycles to repeat for the KL approximation", + "__definition__n_samples": "number of posterior samples to save per reconstruction upon inference (default 3000)", + "weighted_pars": null, + "__definition__maxpool_r1": "size of maxpooling to use in r1 network", + "duration": 1.0, + "__definition__real_noise_time_range": "time range to produce real noise samples over", + "__definition__weighted_pars_factor": "factor by which to weight weighted parameters", + "__definition__tot_dataset_size": "total number of training samples available to use", + "__definition_n_KLzsamp": "The number of samples to use to perfrom MonteCarlo integration over z to compute the KL", + "n_KLzsamp": 1, + "use_real_det_noise": false, + "__definition__pool_strides_q": "size of max pool stride to use in q network", + "__definition__load_iteration": "How often to load another chunk of training samples", + "__definition__ndata": "sampling frequency", + "make_corner_plots": true, + "__definition__load_plot_data": "Use plotting data which has already been generated", + "conv_dilations_r2": [ + 1, + 1, + 1, + 1 + ], + "tset_split": 1000, + "__definition__by_channel": "if True, do convolutions as seperate 1-D channels, if False, stack training samples as 2-D images (n_detectors,(duration*sampling_frequency))", + "__definition__num_iterations": "total number of iterations before ending training of model", + "__definition__conv_dilations_q": "size of convolutional dilation to use in q network", + "hour_angle_range": [ + -3.813467684252483, + 2.469671758574231 + ], + "__definition__gpu_num": "gpu number run is running on", + "__definition__hyperparam_n_call": "number of hyperparameter optimization calls", + "ref_geocent_time": 1325029268, + "__definition__save_interval": "number of iterations to save model", + "save_interval": 1000, + "batch_size": 512, + "hyperparam_n_call": 30, + "__definition__training_data_seed": "tensorflow training random seed number", + "convert_to_hour_angle": false, + "test_set_dir": "/home/joseph.bayley/data/CBC/odds_ratio/test_sets/256Hz_full_15par_2det/test_waveforms", + "twod_conv": false, + "ndata": 256, + "__definition__batch_norm": "if true, do batch normalization in all layers of neural network", + "__definition__make_loss_plot": "If True, generate loss plot from previous plot data", + "hyperparam_optim_stop": 500000, + "samplers": [ + "vitamin", + "dynesty" + ], + "ramp": true, + "drate": 0.0, + "__definition__train_set_dir": "location of training set directory", + "__definition__validation_data_seed": "Seed for validation data", + "ramp_end": 200, + "__definition__n_modes": "number of modes in Gaussian mixture model (ideal 7, but may go higher/lower)", + "__definition__parallel_conv": "if true analyse detectors separately in convolutions, otherwise as channels", + "make_pp_plot": false, + "weight_init": "xavier", + "__definition__plot_interval": "number of iterations to plot validation results corner plots", + "__definition__psd_files": "User may specficy their own psd files for each detector. Must be bilby compatible .txt files. If specifying your own files, do so for each detector. If list is empty, default Bilby PSD values will be used for each detector. ", + "__definition__drate": "dropout rate to use in fully-connected layers", + "n_weights_r2": [ + 1024, + 1024, + 1024 + ], + "conv_dilations_q": [ + 1, + 1, + 1, + 1 + ], + "__definition__extra_decay_factor": "Use an extra decay factor of 0.96 after every load iteration", + "load_plot_data": false, + "__definition_pool_strides_r1": "size of max pool stride to use in r1 network", + "y_normscale": 32, + "print_values": true, + "__definition_maxpool_r2": "size of max pooling to use in r2 network", + "conv_dilations_r1": [ + 1, + 1, + 1, + 1 + ], + "n_weights_q": [ + 1024, + 1024, + 1024 + ], + "__definition__conv_strides_r2": "size of convolutional stride in r2 network", + "__definition__n_weights_r1": "number fully-connected neurons in layers of encoders and decoders in the r1 model", + "__definition__n_weights_r2": "number fully-connected neurons in layers of encoders and decoders in the r2 model", + "__definition__conv_strides_r1": "size of convolutional stride to use in r1 network", + "n_samples": 8000, + "__definition__ref_geocent_time": "reference gps time (not advised to change this)", + "__definition__test_set_dir": "location of test set directory waveforms", + "extra_lr_decay_factor": false, + "val_dataset_size": 1000, + "report_interval": 500, + "rand_pars": [ + "mass_1", + "mass_2", + "luminosity_distance", + "geocent_time", + "phase", + "theta_jn", + "psi" + ], + "real_noise_time_range": [ + 1126051217, + 1137254417 + ], + "inf_pars": [ + "mass_1", + "mass_2", + "luminosity_distance", + "geocent_time", + "phase", + "theta_jn", + "psi" + ], + "__definition__hyperparam_optim_stop": "stopping iteration of hyperparameter optimizer per call", + "__definition__gauss_pars": "parameters that require a truncated gaussian distribution", + "__definition__weighted_pars": "set to None if not using, parameters to weight during training", + "num_iterations": 25001, + "__definition__conv_dilations_r1": "size of convolutional dilation to use in r1 network", + "__definition__conv_dilations_r2": "size of convolutional dilation to use in r2 network", + "__definition__filter_size_q": "size of convolutional filters in q network", + "__definition__figure_sampler_names": "matplotlib figure sampler labels (e.g. [ptemcee,CPNest,emcee])", + "train_set_dir": "/home/joseph.bayley/data/CBC/odds_ratio/training_sets_realnoise_1det_7par_256Hz/tset_tot-1000000_split-1000", + "filter_size_r2": [ + 5, + 5, + 5, + 5 + ], + "resume_training": false, + "det": [ + "H1" + ], + "__definition__hyperparam_optim": "optimize hyperparameters for model during training using gaussian process minimization", + "r": 40, + "hyperparam_optim": false, + "__definition__ramp": "# if true, apply linear ramp to KL loss", + "pool_strides_q": [ + 1, + 2, + 1, + 2 + ], + "__definition__conv_strides_q": "size of convolutional stride to use in q network", + "__definition__maxpool_q": "size of max pooling to use in q network", + "conv_strides_r1": [ + 1, + 1, + 1, + 1 + ], + "conv_strides_r2": [ + 1, + 1, + 1, + 1 + ], + "__definition__n_modes_q": "number of modes in Gaussian mixture model for the q distribution", + "doPE": true, + "initial_training_rate": 0.0001, + "__definition__make_paper_plots": "if True, make paper plots", + "make_loss_plot": true, + "__definition__report_interval": "interval at which to save objective function values and optionally print info during training", + "parallel_conv": false, + "__definition___resume_training": "if True, resume training of a model from saved checkpoint", + "__definition__twod_conv": "if true treat the detectors as an extra image dimension otherwise as channels", + "__definition__n_filters_r2": "number of convolutional filters to use in r2 network (must be divisible by 3)", + "__definition__load_by_chunks": "if True, load training samples by a predefined chunk size rather than all at once", + "make_paper_plots": false, + "__definition__n_filters_r1": "number of convolutional filters to use in r1 network (must be divisible by 3)", + "vitamin_c": true, + "vonmise_pars": [], + "figure_sampler_names": [ + "VItamin", + "Dynesty", + "Ptemcee", + "CPNest", + "Emcee" + ], + "load_iteration": 976, + "run_label": "odds_run0_256", + "tot_dataset_size": 1000000, + "sky_pars": [], + "use_real_events": [ + "GW150914" + ], + "plot_interval": 1000, + "Make_sky_plot": false, + "by_channel": true, + "n_modes_q": 1, + "__definition__n_weights_q": "number fully-connected neurons in layers of encoders and decoders in the q model", + "__definition__hour_angle_range": "min and max range of hour angle space", + "__definition__make_sky_plot": "If True, generate sky plots on corner plots", + "z_dimension": 15, + "bilby_results_label": "256Hz_full_15par_2det", + "n_weights_r1": [ + 1024, + 1024, + 1024 + ], + "val_set_dir": "/home/joseph.bayley/data/CBC/odds_ratio/validation_sets_realnoise_1det_8par_256Hz/tset_tot-1000_split-1000", + "__definition__det": "LIGO detectors to perform analysis on (default is 3detector H1,L1,V1)", + "corner_labels": { + "psi": "${\\psi}$", + "geocent_time": "$t_{0}\\,(\\mathrm{seconds})$", + "phase": "${\\phi}$", + "mass_2": "$m_{2}\\,(\\mathrm{M}_{\\odot})$", + "mass_1": "$m_{1}\\,(\\mathrm{M}_{\\odot})$", + "theta_jn": "$\\Theta_{jn}\\,(\\mathrm{rad})$", + "luminosity_distance": "$d_{\\mathrm{L}}\\,(\\mathrm{Mpc})$" + }, + "__definition__rand_pars": "parameters to randomize (those not listed here are fixed otherwise)", + "__definition__make_kl_plot": "If True, go through kl plotting function", + "psd_files": [], + "plot_dir": "/home/joseph.bayley/public_html/cvae_odds/dnn/CBC/odds_run0_256", + "bilby_pars": [ + "mass_1", + "mass_2", + "luminosity_distance", + "geocent_time", + "theta_jn", + "psi" + ], + "pool_strides_r1": [ + 1, + 2, + 1, + 2 + ], + "load_chunk_size": 20000, + "__definition__use_real_det_noise": "If True, use real detector noise around reference time", + "__definition__vonmises_pars": "parameters that get wrapped on the 1D parameter", + "__definition__ramp_start": "starting iteration of KL divergence ramp", + "__definition__use_real_events": "list containing real ligo events to be analyzed", + "__definition__weight_init": "[xavier,VarianceScaling,Orthogonal]. Network model weight initialization (default is xavier)", + "pe_dir": "/home/joseph.bayley/data/CBC/odds_ratio/test_sets/256Hz_full_7par_1det/test", + "KL_cycles": 1, + "__definition__tset_split": "number of training samples in each training data file" +} diff --git a/run_cbc_condor.py b/run_cbc_condor.py new file mode 100755 index 0000000000000000000000000000000000000000..b1bd18f9961ffe9f8c9dbfceeb01a9ab878d8764 --- /dev/null +++ b/run_cbc_condor.py @@ -0,0 +1,550 @@ +#!/usr/bin/env python +###################################################################################################################### + +# -- Variational Inference for Gravitational wave Parameter Estimation -- + + +####################################################################################################################### + +import warnings +warnings.filterwarnings("ignore") +import os +from os import path +os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' +import argparse +import numpy as np +#import tensorflow as tf +#from tensorflow.python.util import deprecation +#deprecation._PRINT_DEPRECATION_WARNINGS = False +import scipy.io as sio +import h5py +import sys +from sys import exit +import shutil +import bilby +import matplotlib +matplotlib.use('Agg') +import matplotlib.pyplot as plt +import time +from time import strftime +import corner +import glob +from matplotlib.lines import Line2D +import pandas as pd +import logging.config +from contextlib import contextmanager +import json +from lal import GreenwichMeanSiderealTime +import random + +import skopt +from skopt import gp_minimize, forest_minimize, dump +from skopt.space import Real, Categorical, Integer +from skopt.plots import plot_convergence +from skopt.plots import plot_objective, plot_evaluations +from skopt.utils import use_named_args + +try: + from .cbc_scripts import run, gen_real_noise + #from . import plotting + #from . import vitamin_c_fit as vitamin_c + #from .plotting import prune_samples +except (ModuleNotFoundError, ImportError): + from cbc_scripts import run, gen_real_noise + #import plotting + #import vitamin_c_fit as vitamin_c + #from plotting import prune_samples + +# Check for optional basemap installation +try: + from mpl_toolkits.basemap import Basemap + print("module 'basemap' is installed") +except (ModuleNotFoundError, ImportError): + print("module 'basemap' is not installed") + print("Skyplotting functionality is automatically disabled.") + skyplotting_usage = False +else: + skyplotting_usage = True + try: + from .skyplotting import plot_sky + except: + from skyplotting import plot_sky + +""" Script has several main functions: +1.) Generate training data +2.) Generate testing data +3.) Train model +4.) Test model +5.) Generate samples only given model and timeseries +6.) Apply importance sampling to VItamin results +""" + +@contextmanager +def suppress_stdout(): + with open(os.devnull, "w") as devnull: + old_stdout = sys.stdout + sys.stdout = devnull + try: + yield + finally: + sys.stdout = old_stdout + + +def gen_rnoise(params=None,bounds=None,fixed_vals=None): + """ Generate real noise over requested time segment + + Parameters + ---------- + params: dict + Dictionary containing run parameters + bounds: dict + Dictionary containing allowed bounds of GW source parameters + fixed_vals: dict + Dictionary containing the fixed values of GW source parameters + + """ + + # Check for requried parameters files + if params == None or bounds == None or fixed_vals == None: + print('Missing either params file, bounds file or fixed vals file') + exit() + + # Load parameters files + with open(params, 'r') as fp: + params = json.load(fp) + with open(bounds, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals, 'r') as fp: + fixed_vals = json.load(fp) + + # Make training set directory + os.system('mkdir -p %s' % params['train_set_dir']) + + # Make directory for plots + os.system('mkdir -p %s/latest_%s' % (params['plot_dir'],params['run_label'])) + + print() + print('... Making real noise samples') + print() + + # continue producing noise samples until requested number has been fullfilled + stop_flag = False; idx = time_cnt = 0 + start_file_seg = params['real_noise_time_range'][0] + end_file_seg = None + # iterate until we get the requested number of real noise samples + while idx <= params['tot_dataset_size'] or stop_flag: + + + file_samp_idx = 0 + # iterate until we get the requested number of real noise samples per tset_split files + while file_samp_idx <= params['tset_split']: + real_noise_seg = [start_file_seg+idx+time_cnt, start_file_seg+idx+time_cnt+1] + real_noise_data = np.zeros((int(params['tset_split']),int( params['ndata']*params['duration']))) + + try: + # make the data - shift geocent time to correct reference + real_noise_data[file_samp_idx, :] = gen_real_noise(params['duration'],params['ndata'],params['det'], + params['ref_geocent_time'],params['psd_files'], + real_noise_seg=real_noise_seg + ) + print('Found segment') + except ValueError as e: + print(e) + time_cnt+=1 + continue + print(real_noise_data) + exit() + + print("Generated: %s/data_%d-%d.h5py ..." % (params['train_set_dir'],start_file_seg,end_file_seg)) + + # store noise sample information in hdf5 format + hf = h5py.File('%s/data_%d-%d.h5py' % (params['train_set_dir'],start_file_seg,end_file_seg), 'w') + hf.create_dataset('real_noise_samples', data=real_noise_data) + hf.close() + idx+=params['tset_split'] + + # stop training + if idx>params['real_noise_time_range'][1]: + stop_flat = True + exit() + return + +def gen_train(params=None,bounds=None,fixed_vals=None, num_files = 100, start_ind = 0): + """ Generate training samples + + Parameters + ---------- + params: dict + Dictionary containing run parameters + bounds: dict + Dictionary containing allowed bounds of GW source parameters + fixed_vals: dict + Dictionary containing the fixed values of GW source parameters + """ + + # Check for requried parameters files + if params == None or bounds == None or fixed_vals == None: + print('Missing either params file, bounds file or fixed vals file') + exit() + + # Load parameters files + with open(params, 'r') as fp: + params = json.load(fp) + with open(bounds, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals, 'r') as fp: + fixed_vals = json.load(fp) + + # Make training set directory + os.system('mkdir -p %s' % params['train_set_dir']) + + # Make directory for plots + os.system('mkdir -p %s/latest_%s' % (params['plot_dir'],params['run_label'])) + + print() + print('... Making training set') + print() + + # Iterate over number of requested training samples + for i in range(num_files): + + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': True, + }) + with suppress_stdout(): + # generate training sample source parameter, waveform and snr + signal_train, signal_train_pars,snrs = run(sampling_frequency=params['ndata']/params['duration'], + duration=params['duration'], + N_gen=params['tset_split'], + ref_geocent_time=params['ref_geocent_time'], + bounds=bounds, + fixed_vals=fixed_vals, + rand_pars=params['rand_pars'], + seed=params['training_data_seed']+start_ind+i, + label=params['run_label'], + training=True,det=params['det'], + psd_files=params['psd_files'], + use_real_det_noise=params['use_real_det_noise'], + samp_idx=start_ind + i, params=params) + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + }) + fname = '%s/data_%d-%d.h5py' % (params['train_set_dir'],((start_ind + i)*params['tset_split']),params['tot_dataset_size']) + print("Generated: %s ..." % (fname)) + + # store training sample information in hdf5 format + hf = h5py.File('%s' % (fname), 'w') + for k, v in params.items(): + try: + hf.create_dataset(k,data=v) + hf.create_dataset('rand_pars', data=np.string_(params['rand_pars'])) + except: + pass + hf.create_dataset('x_data', data=signal_train_pars) + for k, v in bounds.items(): + hf.create_dataset(k,data=v) + hf.create_dataset('y_data_noisy', data=np.array([])) + hf.create_dataset('y_data_noisefree', data=signal_train) + hf.create_dataset('snrs', data=snrs) + hf.close() + return + +def gen_val(params=None,bounds=None,fixed_vals=None): + """ Generate validation samples + + Parameters + ---------- + params: dict + Dictionary containing run parameters + bounds: dict + Dictionary containing allowed bounds of GW source parameters + fixed_vals: dict + Dictionary containing the fixed values of GW source parameters + """ + + # Check for requried parameters files + if params == None or bounds == None or fixed_vals == None: + print('Missing either params file, bounds file or fixed vals file') + exit() + + # Load parameters files + with open(params, 'r') as fp: + params = json.load(fp) + with open(bounds, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals, 'r') as fp: + fixed_vals = json.load(fp) + + # Make training set directory + os.system('mkdir -p %s' % params['val_set_dir']) + + # Make directory for plots + os.system('mkdir -p %s/latest_%s' % (params['plot_dir'],params['run_label'])) + + print() + print('... Making validation set') + print() + + + # Iterate over number of requested training samples + for i in range(0,params['val_dataset_size'],params['tset_split']): + + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': True, + }) + with suppress_stdout(): + # generate training sample source parameter, waveform and snr + signal_train, signal_train_pars,snrs = run(sampling_frequency=params['ndata']/params['duration'], + duration=params['duration'], + N_gen=params['val_dataset_size'], + ref_geocent_time=params['ref_geocent_time'], + bounds=bounds, + fixed_vals=fixed_vals, + rand_pars=params['rand_pars'], + seed=params['validation_data_seed']+i, + label=params['run_label'], + training=True,det=params['det'], + psd_files=params['psd_files'], + use_real_det_noise=params['use_real_det_noise']) + logging.config.dictConfig({ + 'version': 1, + 'disable_existing_loggers': False, + }) + fname = "%s/data_%d-%d.h5py ..." % (params['val_set_dir'],(i + params['tset_split']),params['val_dataset_size']) + print("Generated: %s" % fname) + + # store training sample information in hdf5 format + hf = h5py.File(fname, 'w') + for k, v in params.items(): + try: + hf.create_dataset(k,data=v) + hf.create_dataset('rand_pars', data=np.string_(params['rand_pars'])) + except: + pass + hf.create_dataset('x_data', data=signal_train_pars) + for k, v in bounds.items(): + hf.create_dataset(k,data=v) + hf.create_dataset('y_data_noisy', data=np.array([])) + hf.create_dataset('y_data_noisefree', data=signal_train) + hf.create_dataset('snrs', data=snrs) + hf.close() + return + +def gen_test(params=None,bounds=None,fixed_vals=None, sig_ind = 0): + """ Generate testing sample time series and posteriors using Bayesian inference (bilby) + + Parameters + ---------- + params: dict + Dictionary containing run parameters + bounds: dict + Dictionary containing allowed bounds of GW source parameters + fixed_vals: dict + Dictionary containing the fixed values of GW source parameters + """ + + # Check for requried parameters files + if params == None or bounds == None or fixed_vals == None: + print('Missing either params file, bounds file or fixed vals file') + exit() + + # Load parameters files + with open(params, 'r') as fp: + params = json.load(fp) + with open(bounds, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals, 'r') as fp: + fixed_vals = json.load(fp) + + # Make testing set directory + if not os.path.isdir(params["test_set_dir"]): + os.system('mkdir -p %s' % params['test_set_dir']) + + # Add numerical label to samplers + for i in range(len(params['samplers'])): + if i == 0: + continue + else: + params['samplers'][i] = params['samplers'][i]+'1' + + # Make testing samples + temp_noisy, temp_noisefree, temp_pars, temp_snr = run(sampling_frequency=params['ndata']/params['duration'], + duration=params['duration'], + N_gen=1, + ref_geocent_time=params['ref_geocent_time'], + bounds=bounds, + fixed_vals=fixed_vals, + rand_pars=params['rand_pars'], + inf_pars=params['inf_pars'], + label=params['bilby_results_label'] + '_' + str(sig_ind), + out_dir=params['pe_dir'], + samplers=params['samplers'], + training=False, + seed=params['testing_data_seed']+sig_ind, + do_pe=params['doPE'],det=params['det'], + psd_files=params['psd_files'], + use_real_det_noise=params['use_real_det_noise'], + use_real_events=params['use_real_events'], + samp_idx=sig_ind) + + signal_test_noisy = temp_noisy + signal_test_noisefree = temp_noisefree + signal_test_pars = temp_pars + signal_test_snr = temp_snr + fname = "%s/%s_%s.h5py ..." % (params['test_set_dir'],params['bilby_results_label'],sig_ind) + print("Generated: %s ..." % (fname)) + + # Save generated testing samples in h5py format + hf = h5py.File(fname,'w') + for k, v in params.items(): + try: + hf.create_dataset(k,data=v) + hf.create_dataset('rand_pars', data=np.string_(params['rand_pars'])) + except: + pass + hf.create_dataset('x_data', data=signal_test_pars) + for k, v in bounds.items(): + hf.create_dataset(k,data=v) + hf.create_dataset('y_data_noisefree', data=signal_test_noisefree) + hf.create_dataset('y_data_noisy', data=signal_test_noisy) + hf.create_dataset('snrs', data=signal_test_snr) + hf.close() + return + +def create_dirs(dirs): + for i in dirs: + if not os.path.isdir(i): + try: + os.makedirs(i) + except: + print >> sys.stderr, "Could not create directory {}".format(i) + sys.exit(1) + print("All directories exist") + +def write_subfile(sub_filename,p,comment): + print(sub_filename) + with open(sub_filename,'w') as f: + f.write('# filename: {}\n'.format(sub_filename)) + f.write('universe = vanilla\n') + f.write('executable = {}\n'.format(p["exec"])) + #f.write('enviroment = ""\n') + f.write('getenv = True\n') + #f.write('RequestMemory = {} \n'.format(p["memory"])) + f.write('log = {}/{}_$(cluster).log\n'.format(p["log_dir"],comment)) + f.write('error = {}/{}_$(cluster).err\n'.format(p["err_dir"],comment)) + f.write('output = {}/{}_$(cluster).out\n'.format(p["output_dir"],comment)) + args = "" + if p["train"]: + args += "--gen_train True " + if p["val"]: + args += "--gen_val True " + if p["test"]: + args += "--gen_test True " + args += "--start_ind $(start_ind) --num_files {} --params_dir {}".format( p["files_per_job"], p["params_dir"]) + f.write('arguments = {}\n'.format(args)) + f.write('accounting_group = ligo.dev.o4.cbc.explore.test\n') + f.write('queue\n') + + +def make_train_dag(params, bounds, fixed_vals, params_dir, run_type = "train"): + + # Load parameters files + with open(params, 'r') as fp: + params = json.load(fp) + with open(bounds, 'r') as fp: + bounds = json.load(fp) + with open(fixed_vals, 'r') as fp: + fixed_vals = json.load(fp) + + p = {} + p["root_dir"] = os.getcwd() + p["condor_dir"] = "{}/condor".format(os.path.join(p["root_dir"],params_dir)) + p["log_dir"] = os.path.join(p["condor_dir"], "log") + p["err_dir"] = os.path.join(p["condor_dir"], "err") + p["output_dir"] = os.path.join(p["condor_dir"], "output") + p["params_dir"] = os.path.join(p["root_dir"],params_dir) + p["exec"] = os.path.join(p["root_dir"], "run_cbc_condor.py") + + p["train"] = False + p["val"] = False + p["test"] = False + p[run_type] = True + p["files_per_job"] = 20 + + for direc in [p["condor_dir"], p["log_dir"], p["err_dir"], p["output_dir"]]: + if not os.path.exists(direc): + os.makedirs(direc) + + comment = "{}_run".format(run_type) + run_sub_filename = os.path.join(p["condor_dir"], "{}.sub".format(comment)) + write_subfile(run_sub_filename,p,comment) + + + dag_filename = "{}/{}.dag".format(p["condor_dir"],comment) + if run_type == "train": + num_files = int(params["tot_dataset_size"]/params["tset_split"]) + num_jobs = int(num_files/p["files_per_job"]) + elif run_type == "val": + num_files = int(params["val_dataset_size"]/params["tset_split"]) + num_jobs = 1 + elif run_type == "test": + num_jobs = params["r"] + + with open(dag_filename,'w') as f: + seeds = [] + for i in range(num_jobs): + seeds.append(random.randint(1,1e9)) + for i in range(num_jobs): + comment = "File_{}".format(i) + uid = seeds[i] + jobid = "{}_{}_{}".format(comment,i,uid) + job_string = "JOB {} {}\n".format(jobid,run_sub_filename) + retry_string = "RETRY {} 1\n".format(jobid) + if run_type == "train": + vars_string = 'VARS {} start_ind="{}"\n'.format(jobid,int(i*p["files_per_job"])) + else: + vars_string = 'VARS {} start_ind="{}"\n'.format(jobid,int(i)) + f.write(job_string) + f.write(retry_string) + f.write(vars_string) + + + +if __name__ == "__main__": + # If running module from command line + + parser = argparse.ArgumentParser(description='Odds calc') + parser.add_argument("--gen_dag", default=False, help="generate the dag file for training/val/test") + parser.add_argument("--gen_train", default=False, help="generate the training data") + parser.add_argument("--gen_rnoise", default=False, help="generate the real noise samples") + parser.add_argument("--gen_val", default=False, help="generate the validation data") + parser.add_argument("--gen_test", default=False, help="generate the testing data") + parser.add_argument("--num_files", default=1, help="number of files to generate") + parser.add_argument("--start_ind", default=0, help="start file index") + parser.add_argument("--params_dir", type=str, default="./params_files", help="directory containing params files") + parser.add_argument("--run_type", type=str, default="train", help="train, test, val for dag file") + + args = parser.parse_args() + + params_dir = args.params_dir + + # Define default location of the parameters files + params = os.path.join(params_dir, 'params.json') + bounds = os.path.join(params_dir, 'bounds.json') + fixed_vals = os.path.join(params_dir, 'fixed_vals.json') + + if args.gen_dag: + make_train_dag(params, bounds, fixed_vals, params_dir, run_type = args.run_type) + else: + if args.gen_train: + gen_train(params,bounds,fixed_vals, start_ind = int(args.start_ind), num_files = int(args.num_files)) + #if args.gen_rnoise: + # gen_rnoise(params,bounds,fixed_vals) + if args.gen_val: + gen_val(params,bounds,fixed_vals) + if args.gen_test: + gen_test(params,bounds,fixed_vals, sig_ind = int(args.start_ind)) + diff --git a/run_cvae.py b/run_cvae.py index 7a3e53f48427948d3eaac170f0602faec37dbe3f..f12a18ee4f499b7933b626fa95323d6c6d1b7819 100755 --- a/run_cvae.py +++ b/run_cvae.py @@ -135,12 +135,17 @@ def train(model, device, epochs, train_iterator, learning_rate, validation_itera val_losses.append(val_loss) val_kl_losses.append(val_kl_loss) val_lik_losses.append(val_lik_loss) + + # if validation loss is lowest then save the model file + if val_loss < min(val_losses): + torch.save(model, os.path.join(save_dir,"model.pt")) # save the model if epoch % int(epochs/10) == 0 or epoch == epochs - 1: print("Train: Epoch: {}, Training loss: {}, kl_loss: {}, l_loss:{}".format(epoch,train_loss,kl_loss,lik_loss)) print("Validation: Epoch: {}, Training loss: {}, kl_loss: {}, l_loss:{}".format(epoch,val_loss,val_kl_loss,val_lik_loss)) loss_plot(save_dir, train_losses, kl_losses, lik_losses, val_losses, val_kl_losses, val_lik_losses) + if do_test or epoch == epochs - 1: # test plots lat_fig = latent_plot(pars, local_labels.cpu().numpy()) @@ -167,7 +172,11 @@ def run(model,test_it,num_samples = 500,device="cpu"): # Transfer to GPU local_batch, local_labels = local_batch.to(device), local_labels.to(device) - samples,zr_samples,zq_samples = model.test_latent(local_batch,local_labels,num_samples) + labs = np.zeros((len(local_batch), 2)) + for i,v in enumerate(local_labels): + labs[i,int(v.cpu().numpy())] = 1 + + samples,zr_samples,zq_samples = model.test_latent(local_batch,torch.Tensor(labs).to(device),num_samples) #samples = np.transpose(np.array(samples),(1,0,2)) truths = local_labels break @@ -346,9 +355,9 @@ def load_train_data(datatype = "line", num_train=1e4, num_val = 1e3, length = 10 validation_data = gen_data.GenSin(length=length, dtype="cart2d",package="pytorch") validation_data.gen_train_data(num_val, noise_var = noise_var) elif datatype == "line": - train_data = gen_data.GenLine(length=length, package = "pytorch") + train_data = gen_data.GenLine(length=length, package = "pytorch", onehot = True) train_data.gen_train_data(num_train, noise_var = noise_var) - validation_data = gen_data.GenLine(length=length, package = "pytorch") + validation_data = gen_data.GenLine(length=length, package = "pytorch", onehot=True) validation_data.gen_train_data(num_val, noise_var = noise_var) elif datatype == "SG": train_data = gen_data.GenSG(length=length, package = "pytorch") @@ -390,6 +399,7 @@ def run_cvae_lin(device,do_train=True,do_test=True, # load precomputed odds with open(os.path.join(test_dir, "nest_odds.pkl"),"rb") as f: nest_odds = pickle.load(f) + true_mod = np.array([float(np.array(i[1]).argmax()) for i in test_data.data]) # define the fully connected and convolutional layers @@ -420,7 +430,7 @@ def run_cvae_lin(device,do_train=True,do_test=True, losses, kl_loss, l_loss, val_loss, val_kl_loss, val_l_loss= train(model, device, train_epochs, train_iterator, learning_rate, validation_iterator, test_iterator, nest_odds, true_mod, num_samples, save_dir,ramp_start=ramp_start,ramp_end=ramp_end) # output loss plot loss_plot(save_dir, losses, kl_loss, l_loss, val_loss, val_kl_loss, val_l_loss) # make the loss plot - torch.save(model, os.path.join(save_dir,"model.pt")) # save the model + #torch.save(model, os.path.join(save_dir,"model.pt")) # save the model else: try: model = torch.load(os.path.join(save_dir,"model.pt")).to(device) @@ -642,26 +652,26 @@ def brute_search(device,length=None,noise_var=None,train_epochs=None,datatype=No if __name__ == "__main__": - os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" + os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7" # make double the default type for tensors torch.set_default_tensor_type(torch.DoubleTensor) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - device = torch.device('cuda', 2) + device = torch.device('cuda', 3) print(device, torch.cuda.get_device_name(device)) num_train = int(5e5) lengths = [25] - datatype = "SG" + datatype = "line" noise_var =[0.1] - num_test = 200 + num_test = 300 num_samples = 100000 # network parameters - train_epochs = [3000] + train_epochs = [2000] ramp_start = 100 ramp_end = 1000 @@ -692,19 +702,19 @@ if __name__ == "__main__": """ run_cvae_lin(device=device, - latent_dim=1, + latent_dim=2, par_dim=num_parameters, - num_fc_layers=1, - num_fc_neurons=8, - num_conv_layers=1, - num_conv_filters=8, - conv_size=4, - maxpool_size=4, + num_fc_layers=4, + num_fc_neurons=128, + num_conv_layers=0, + num_conv_filters=0, + conv_size=0, + maxpool_size=0, stride=1, learning_rate=1e-4, train_epochs=t_ep, length=ln, - batch=256, + batch=500, num_train=num_train, num_test=num_test, datatype=datatype, diff --git a/run_dnn.py b/run_dnn.py index b0f2d39bddbbc2880b9d1ef3cd0c77c330abf056..3d3bd8c97ff0c7955255ce400bff0fb240706373 100755 --- a/run_dnn.py +++ b/run_dnn.py @@ -18,24 +18,113 @@ import emcee #sys.path.append("../gendata/") import gen_data #import gen_cw +import mpmath +mpmath.dps = 50 +import copy - -def adjust_learning_rate(lr, optimizer, epoch, decay_rate = 0.99): +def adjust_learning_rate(lr, optimizer, epoch, decay_rate = 0.99, low_cut = 1e-10): """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" - lr = lr * (decay_rate ** (epoch // 5)) + if lr <= low_cut: + lr = lr + else: + lr = lr * (decay_rate ** (epoch // 5)) for param_group in optimizer.param_groups: param_group['lr'] = lr -def train_batch(epoch, model, optimizer, device, batch, labels, ramp=1.0, train = True): +class LogExpm1(torch.autograd.Function): + """ + We can implement our own custom autograd Functions by subclassing + torch.autograd.Function and implementing the forward and backward passes + which operate on Tensors. + """ + + @staticmethod + def forward(ctx, inpt): + """ + In the forward pass we receive a Tensor containing the input and return + a Tensor containing the output. ctx is a context object that can be used + to stash information for backward computation. You can cache arbitrary + objects for use in the backward pass using the ctx.save_for_backward method. + """ + ctx.save_for_backward(inpt) + noise = torch.Tensor([np.double(ipt + mpmath.log(mpmath.expm1(-ipt))) for ipt in inpt.detach().cpu().numpy()]).to(inpt.device) + return noise + + @staticmethod + def backward(ctx, grad_output): + """ + In the backward pass we receive a Tensor containing the gradient of the loss + with respect to the output, and we need to compute the gradient of the loss + with respect to the input. + """ + inpt, = ctx.saved_tensors + grad_input = grad_output.clone() + grad_input *= torch.Tensor([np.double(1 - mpmath.exp(-ipt)/(mpmath.expm1(-ipt))) for ipt in inpt.detach().cpu().numpy()]).to(inpt.device) + return grad_input + + +def loss_func(logits, labels, device="CPU"): + + #sigs = torch.sum(-logits*labels + torch.log(1 + torch.exp(logits))) + #nums = torch.sum(labels, dim = 0) + #noises = 1./nums[0]*torch.sum(labels[:,0]*logits[:,0]) + #sigs = 1./nums[1]*torch.sum(labels[:,1]*logits[:,1]) + + #val = torch.ones(logits[labels == 0].size()).to(device)*50 + #fact = torch.log(1 - torch.exp(logits[labels == 0 ])) + noises = torch.mean(LogExpm1.apply(logits[labels == 0])) + sigs = torch.mean(logits[labels == 1]) + + return -(sigs+noises) + +def loss_func_logsoft(logits, labels, device="CPU"): + """ loss to use with log softmax""" + noises = torch.mean(logits[:,0]*labels[:,1]) + sigs = torch.mean(logits[:,1]*labels[:,0]) + return -(sigs+noises) + +def loss_func_logsigmoid(logits, labels, device="CPU", weights = None): + """ loss to use with log softmax""" + if weights is None: + weightss = torch.ones(logits.size()) + noises = torch.mean(torch.log(1 - torch.exp(logits))*(1-labels)) + sigs = torch.mean(logits*labels*weights) + return -(sigs+noises) + +def loss_func_lin(outs, labels): + + out1 = outs[outs[:,0] < outs[:,1]] + out2 = outs[outs[:,0] >= outs[:,1]] + + labels1 = labels[outs[:,0] < outs[:,1]][:,1] + labels2 = labels[outs[:,0] >= outs[:,1]][:,1] + + #print(out1.size(), out1[:,0].size(), out1[:,1].size(), labels1.size()) + #print(out2.size(), out2[:,0].size(), out2[:,1].size(), labels2.size()) + + l1 = labels1*(out1[:,0] - out1[:,1]) + out1[:,1] - out1[:,0] - torch.log(1 + torch.exp(out1[:,1] - out1[:,0])) + l2 = labels2*(out2[:,0] - out2[:,1]) - torch.log(1 + torch.exp(out2[:,0] - out2[:,1])) + + return -torch.mean(torch.cat((l1,l2))) + +def train_batch(epoch, model, optimizer, device, batch, labels, snrs, ramp=1.0, train = True): model.train(train) if train: optimizer.zero_grad() length = float(batch.size(0)) # calculate r2, q and r1 means and variances out_x = model(batch) - - loss_fn = nn.BCELoss() - loss = loss_fn(out_x.reshape(np.shape(labels)), labels) + + #loss_fn = nn.BCEWithLogitsLoss() + #loss_fn = nn.NLLLoss() # use with logsoftmax + #loss_fn = nn.MSELoss() + #par = labels.argmax(1).long() + #loss = loss_fn(out_x.reshape(np.shape(labels)), torch.roll(par,1,dims=1)) + #loss = loss_fn(out_x.reshape(np.shape(labels)), labels) + #loss = loss_func(out_x.reshape(np.shape(labels)), labels, device) + #loss = loss_func_logsoft(out_x.reshape(np.shape(labels)), labels, device) + #loss = loss_func_logsigmoid(out_x.reshape(np.shape(labels)), labels, device, snrs) + loss = loss_func_lin(out_x.reshape(np.shape(labels)), labels) if train: loss.backward() @@ -44,7 +133,7 @@ def train_batch(epoch, model, optimizer, device, batch, labels, ramp=1.0, train return loss.item() -def train(model, device, epochs, train_iterator, learning_rate, validation_iterator, test_iterator, nest_odds, true_mod, num_samples, save_dir, decay_start=100, decay_rate = 0.99): +def train(model, device, epochs, train_iterator, learning_rate, validation_iterator, test_iterator, nest_odds, true_mod, num_samples, save_dir, decay_start=100, decay_rate = 0.99, low_cut = 1e-10): optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate) @@ -56,30 +145,33 @@ def train(model, device, epochs, train_iterator, learning_rate, validation_itera true_mods_last = [] for epoch in range(epochs): - if epoch > decay_start: - adjust_learning_rate(learning_rate, optimizer, epoch, decay_rate=decay_rate) + adjust_learning_rate(learning_rate, optimizer, epoch, decay_rate=decay_rate, low_cut = low_cut) # Training - for local_batch, local_labels in train_iterator: + for local_batch, (local_labels, local_snr) in train_iterator: # Transfer to GPU - local_batch, local_labels = local_batch.to(device), local_labels.to(device) - train_loss = train_batch(epoch, model, optimizer, device, local_batch,local_labels, train=True) + local_batch, local_labels, local_snr = local_batch.to(device), local_labels.to(device), local_snr.to(device) + train_loss = train_batch(epoch, model, optimizer, device, local_batch,local_labels, local_snr, train=True) train_losses.append(train_loss) # validation - for val_batch, val_labels in validation_iterator: + for val_batch, (val_labels, val_snr) in validation_iterator: # Transfer to GPU - val_batch, val_labels = val_batch.to(device), val_labels.to(device) - val_loss = train_batch(epoch, model, optimizer, device, val_batch, val_labels, train=False) + val_batch, val_labels, val_snr = val_batch.to(device), val_labels.to(device), val_snr.to(device) + val_loss = train_batch(epoch, model, optimizer, device, val_batch, val_labels, val_snr, train=False) val_losses.append(val_loss) if epoch % int(epochs/10) == 0: print("Epoch: {}, Training loss: {}, Validation loss: {}".format(epoch,train_loss, val_loss)) - + print(local_batch.dtype) loss_plot(save_dir, train_losses, val_losses) est_odds,_ = run(model,test_iterator,device=device) + odds_fig_full = odds_plot_full(nest_odds,est_odds) + odds_fig_full.savefig('{}/odds_full_epoch{}.png'.format(save_dir,epoch)) odds_fig = odds_plot(nest_odds,est_odds,true_mod,num_samples) odds_fig.savefig('{}/odds_epoch{}.png'.format(save_dir,epoch)) + plt.close(odds_fig) + plt.close(odds_fig_full) if epoch > epochs - 20: odds_last_est.extend(est_odds) @@ -90,12 +182,23 @@ def train(model, device, epochs, train_iterator, learning_rate, validation_itera odds_fig.savefig('{}/odds_last_20epochs.png'.format(save_dir,epoch)) return train_losses,val_losses -def dnn_odds(outputs): +def dnn_odds(outputs, labels): outs = [] - for test in outputs: - #num = test[1]/(1-test[1]) - #den = test[0]/(1-test[0]) - odds = test[0]/(1-test[0])#num/den + done = 0 + for i,test in enumerate(outputs): + #odds = test[0]/(1-test[0]) # sigmoid out + #odds = np.double(1./mpmath.expm1(-test[0])) # mpmath version + #odds = np.exp(test[0] - test[1]) + #odds = np.double(mpmath.expm1(test[0])) + odds = np.double(mpmath.exp(test[0] - test[1])) + #print("modshape",np.shape(labels[i])) + if done == 0 and labels[i][1] == 0: + print(test[0], test[1], odds, labels[i]) + done = 1 + if done == 1 and labels[i][1] == 1: + print(test[0], test[1],odds, labels[i]) + done = 2 + outs.append(odds) return np.array(outs) @@ -111,9 +214,11 @@ def run(model,test_it,device="cpu"): for local_batch, local_labels in test_it: # Transfer to GPU local_batch, local_labels = local_batch.to(device), local_labels.to(device) - + #sig = torch.nn.Sigmoid() + print(local_batch.size()) outputs = model.test(local_batch) - odds = dnn_odds(outputs) + outputs = outputs.reshape((np.shape(outputs)[0], np.shape(outputs)[-1])) + odds = dnn_odds(torch.Tensor(outputs).numpy(),local_labels) #samples = np.transpose(np.array(samples),(1,0,2)) truths = local_labels break @@ -126,6 +231,9 @@ def loss_plot(save_dir, loss, val_loss, space = 10): ax[0].plot(xs, loss, label="loss", color="C0", alpha=0.5) ax[0].plot(xs, val_loss, label="val loss", color="C1", ls="--", lw=3, alpha=0.5) + + ax[1].plot(xs, loss, label="loss", color="C0", alpha=0.5) + ax[1].plot(xs, val_loss, label="val loss", color="C1", ls="--", lw=3, alpha=0.5) ax[1].set_xlabel("Epoch") ax[0].legend(fontsize=20) @@ -133,6 +241,7 @@ def loss_plot(save_dir, loss, val_loss, space = 10): ax[0].grid() ax[1].grid() ax[1].set_yscale("log") + ax[1].set_xscale("log") #ax.set_yscale("symlog") fig.savefig(os.path.join(save_dir, "loss.png")) plt.close(fig) @@ -147,37 +256,52 @@ def odds_plot(trueOdds,estOdds,trueMod,num_samples=1000): # identify the indices of where any results are zero or infinite k = np.argwhere((trueOdds==0.0) + (estOdds==0.0) + (trueOdds>num_samples) + (estOdds>num_samples)) + true_odds = copy.copy(trueOdds) + est_odds = copy.copy(estOdds) # cap the true and est odds to the cvae limits j = np.argwhere(trueOdds>num_samples) - trueOdds[j] = num_samples - 1.0 + true_odds[j] = num_samples - 1.0 j = np.argwhere(trueOdds==0.0) - trueOdds[j] = 1.0/num_samples + true_odds[j] = 1.0/num_samples j = np.argwhere(estOdds==0.0) - estOdds[j] = 1.0/num_samples + est_odds[j] = 1.0/num_samples j = np.argwhere(estOdds>num_samples) - estOdds[j] = num_samples - 1.0 + est_odds[j] = num_samples - 1.0 # identify the indices of results that fall in the good zone - noise_idx = np.isfinite(trueOdds)*np.isfinite(estOdds)*(trueMod<1.0)*(trueOdds>0.0)*(estOdds>0.0) - signal_idx = np.isfinite(trueOdds)*np.isfinite(estOdds)*(trueMod>0.0)*(trueOdds>0.0)*(estOdds>0.0) + noise_idx = np.isfinite(true_odds)*np.isfinite(est_odds)*(trueMod<1.0)*(true_odds>0.0)*(est_odds>0.0) + signal_idx = np.isfinite(true_odds)*np.isfinite(est_odds)*(trueMod>0.0)*(true_odds>0.0)*(est_odds>0.0) - plt.plot(np.log(trueOdds[noise_idx]),np.log(estOdds[noise_idx]),'r.',label='noise') - plt.plot(np.log(trueOdds[signal_idx]),np.log(estOdds[signal_idx]),'b.',label='signal') - plt.plot(np.log(trueOdds[k]),np.log(estOdds[k]),'ko',label='out of bounds') + plt.plot(np.log(true_odds[noise_idx]),np.log(est_odds[noise_idx]),'r.',label='noise') + plt.plot(np.log(true_odds[signal_idx]),np.log(est_odds[signal_idx]),'b.',label='signal') + plt.plot(np.log(true_odds[k]),np.log(est_odds[k]),'ko',label='out of bounds') plt.plot(np.log([1.0/num_samples, num_samples]),np.log([1.0/num_samples, num_samples]),'-k',alpha=0.5) plt.plot(np.log(trues),np.log(trues) + ovar,'-r',alpha=0.5) plt.plot(np.log(trues),np.log(trues) - ovar,'-r',alpha=0.5) plt.xlim([np.log(1.0/num_samples)-1, np.log(num_samples)+1]) plt.ylim([np.log(1.0/num_samples)-1, np.log(num_samples)+1]) - plt.xlabel('true Odds') - plt.ylabel('estimated Odds') + plt.xlabel('true Log Odds') + plt.ylabel('estimated Log Odds') plt.legend() + plt.grid() + return fig + +def odds_plot_full(true_odds,est_odds): + + fig,ax = plt.subplots(figsize = (20,10)) + + ax.plot(np.log(true_odds), np.log(est_odds), ".") + ax.plot([-10,60],[-10,60],"r") + ax.set_xlim([-10,60]) + ax.set_ylim([-10,60]) + ax.set_xlabel("Numerical odds", fontsize = 20) + ax.set_ylabel("NN odds", fontsize = 20) return fig def test_model(save_dir, model, - run_data, + run_iterator, nestOdds, trueMod, datatype = "line", @@ -187,13 +311,14 @@ def test_model(save_dir, noise_var=0.1 ): - run_iterator = DataLoader(run_data, batch_size=num_test) + #run_iterator = DataLoader(run_data, batch_size=num_test, shuffle = False) samples,truths = run(model,run_iterator,device=device) estOdds = samples#dnn_odds(samples) test_data = os.path.join(save_dir,"test_data") test_data_plots = os.path.join(save_dir,"test_data_plots") test_plots = os.path.join(save_dir,"test_plots") + for d in [test_data, test_data_plots,test_plots]: if not os.path.isdir(d): os.makedirs(d) @@ -207,40 +332,42 @@ def test_model(save_dir, for ind in range(num_test): fig, ax = plt.subplots() - ax.plot(run_data.data[ind][0],label = [val for val in run_data.data[ind][1]]) + ax.plot(run_iterator.dataset.data[ind][0],label = [val for val in run_iterator.dataset.data[ind][1]]) ax.set_ylim([-1.2,1.2]) - fig.savefig(os.path.join(test_data_plots,"test_{}.png".format(ind))) + fig.savefig(os.path.join(test_data_plots,"test_{}_s{}.png".format(ind, run_iterator.dataset.data[ind][1]))) plt.close(fig) with open(os.path.join(test_data,"test_{}.pkl".format(ind)),"wb") as f: - pickle.dump(run_data.data[ind],f) + pickle.dump(run_iterator.dataset.data[ind],f) -def run_cvae_lin(device, - do_train=True, - do_test=True, - par_dim=2, - num_conv_layers=2, - num_fc_layers=2, - num_conv_filters=8, - conv_size=5, - pool_size=2, - stride=1, - num_fc_neurons=8, - learning_rate=1e-4, - decay_start = 100, - decay_rate = 0.99, - train_epochs=500, - num_test=1000, - num_samples=1000, - noise_var=0.1, - length=25, - batch=256, - num_train=1e4, - datatype="line", - dropout=0.1, - basedir=None, - test_dir = None, - ): +def run_dnn_lin(device, + do_train=True, + do_test=True, + par_dim=2, + num_conv_layers=2, + num_fc_layers=2, + num_conv_filters=8, + conv_size=5, + pool_size=2, + stride=1, + num_fc_neurons=8, + learning_rate=1e-4, + decay_start = 100, + decay_rate = 0.99, + low_cut = 1e-10, + train_epochs=500, + num_test=1000, + num_samples=1000, + noise_var=0.1, + length=25, + batch=256, + num_train=1e4, + datatype="line", + dropout=0.1, + basedir=None, + test_dir = None, + datapp = "", + snrlim = (0,1e6)): if datatype == "sin": train_data = gen_data.GenSin(length=length,package="pytorch") @@ -253,22 +380,24 @@ def run_cvae_lin(device, validation_data = gen_data.GenSin(length=length, dtype="cart2d",package="pytorch") validation_data.gen_train_data(batch, noise_var = noise_var) elif datatype == "line": - train_data = gen_data.GenLine(length=length, package = "pytorch") + train_data = gen_data.GenLine(length=length, package = "pytorch",onehot=True) train_data.gen_train_data(num_train, noise_var = noise_var) - validation_data = gen_data.GenLine(length=length, package = "pytorch") - validation_data.gen_train_data(batch, noise_var = noise_var) + validation_data = gen_data.GenLine(length=length, package = "pytorch",onehot=True) + validation_data.gen_train_data(10*batch, noise_var = noise_var) elif datatype == "SG": - train_data = gen_data.GenSG(length=length, package = "pytorch",onehot=False) - train_data.gen_train_data(num_train, noise_var = noise_var) - validation_data = gen_data.GenSG(length=length, package = "pytorch",onehot=False) - validation_data.gen_train_data(10*batch, noise_var = noise_var) + train_data = gen_data.GenSG(length=length, package = "pytorch",onehot=True) + train_data.gen_train_data(num_train, noise_var = noise_var, snr_lim = snrlim) + validation_data = gen_data.GenSG(length=length, package = "pytorch",onehot=True) + validation_data.gen_train_data(10*batch, noise_var = noise_var,snr_lim = snrlim) elif datatype == "gencw": train_data = gen_cw.GenCW() train_data.gen_train_data(num, noise_var = noise_var) + print("train_data_shape",np.shape(train_data.data)) train_iterator = DataLoader(train_data, batch_size=batch, shuffle=True) validation_iterator = DataLoader(validation_data, batch_size=batch, shuffle=True) + print("Test Dir: {}".format(test_dir)) with open(os.path.join(test_dir, "modelclass.pkl"),"rb") as f: test_data = pickle.load(f) @@ -279,6 +408,9 @@ def run_cvae_lin(device, nest_odds = pickle.load(f) true_mod = np.array([float(i[1][1]) for i in test_data.data]) + print("truths",np.shape(true_mod)) + #true_mod = np.array([i[1] for i in test_data.data]) + #true_mod = np.array([(float(i[1][0]), float(i[1][1])) for i in test_data.data]) fc_layers = [num_fc_neurons for i in range(num_fc_layers)] conv_layers = [(num_conv_filters, conv_size) for i in range(num_conv_layers)] @@ -290,12 +422,13 @@ def run_cvae_lin(device, fc_layers = fc_layers, conv_layers=conv_layers, maxpool_size=pool_size, - stride=stride).to(device) + stride=stride, + dropout=dropout).to(device) - save_dir = '{}/{}/ntrain{}_xlen{}_noise{}/epoch{}_lr{}_dec{}_{}_batch{}_nconv{}_nconvn{}_convsize{}_pool{}_stride{}_drop{}_nfc{}_nfcn{}'.format( - basedir,datatype, - num_train,length,noise_var, - train_epochs,learning_rate,decay_start,decay_rate,batch, + save_dir = '{}/{}{}/ntrain{}_xlen{}_noise{}_snrlim{}_{}/epoch{}_lr{}_dec{}_cut{}_{}_batch{}_nconv{}_nconvn{}_convsize{}_pool{}_stride{}_drop{}_nfc{}_nfcn{}_leakyRelu_linout'.format( + basedir,datatype, datapp, + num_train,length,noise_var,snrlim[0],snrlim[1], + train_epochs,learning_rate,decay_start,decay_rate,low_cut,batch, num_conv_layers,num_conv_filters,conv_size,pool_size,stride,dropout, num_fc_layers,num_fc_neurons) @@ -304,7 +437,7 @@ def run_cvae_lin(device, # train the network if do_train: - losses, val_loss= train(model, device, train_epochs, train_iterator, learning_rate, validation_iterator, test_iterator, nest_odds, true_mod, num_samples, save_dir, decay_rate = decay_rate, decay_start=decay_start) + losses, val_loss= train(model, device, train_epochs, train_iterator, learning_rate, validation_iterator, test_iterator, nest_odds, true_mod, num_samples, save_dir, decay_rate = decay_rate, decay_start=decay_start, low_cut = low_cut) loss_plot(save_dir, losses, val_loss ) # make the loss plot torch.save(model, os.path.join(save_dir,"model.pt")) # save the model else: @@ -315,7 +448,7 @@ def run_cvae_lin(device, exit(0) # test the model - test_model(save_dir, model, test_data, nest_odds, true_mod, length=length, num_test=num_test, num_samples=num_samples, datatype=datatype) + test_model(save_dir, model, test_iterator, nest_odds, true_mod, length=length, num_test=num_test, num_samples=num_samples, datatype=datatype) print('{}: saved results to {}'.format(time.asctime(),save_dir)) @@ -323,47 +456,52 @@ if __name__ == "__main__": # data parameters - num_train = int(1e5) - num_test = 200 - batch = 1000 + num_train = int(1e6) + num_test = 300 + batch = 256 length = 25 datatype = "SG" noise_var = 0.1 + snrlim = (0,1000) # network parameters - train_epochs = 700 - learning_rate = 1e-7 - decay_start = 50 - decay_rate = 5e-1 + train_epochs = 10000 + learning_rate = 1e-6 + decay_start = 500 + decay_rate = 0.997 + low_cut = 1e-11 # network layers - num_parameters = 1 + num_parameters = 2 # dnn network - num_hidden = [3]#[0,1,2] - num_neurons = [32]#[8,16,32] - dropout = [0.2] + num_hidden = [4]#[0,1,2] + num_neurons = [256]#[8,16,32] + dropout = [0.0] - conv_size = [8]#[3,5] - num_conv_filters = [8] - num_conv = [1] + conv_size = [16]#[3,5] + num_conv_filters = [32] + num_conv = [3] maxpool_size = [1] stride = 1 # train or test or both do_train = True do_test = True + #datapp = "_log1.5_6A" + datapp = ""#"_uniformA" basedir = '/home/joseph.bayley/public_html/cvae_odds/dnn/' test_dir = "/home/joseph.bayley/data/cvae_odds/test/" - test_dir = '{}/{}/ntest{}_xlen{}_noise{}/'.format(test_dir,datatype,num_test,length,noise_var) + #test_dir = '{}/{}{}/ntest{}_xlen{}_noise{}_snrlim10/'.format(test_dir,datatype,datapp,num_test,length,noise_var) + test_dir = '{}/{}{}/ntest{}_xlen{}_noise{}_snrlim2/'.format(test_dir,datatype,datapp,num_test,length,noise_var) - os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3" + os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7" # make double the default type for tensors torch.set_default_tensor_type(torch.DoubleTensor) #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - device = torch.device('cuda', 1) + device = torch.device('cuda', 6) print(device, torch.cuda.get_device_name(device)) @@ -376,7 +514,7 @@ if __name__ == "__main__": for n_conv_f in num_conv_filters: for mp in maxpool_size: # run the analysis - run_cvae_lin(device, + run_dnn_lin(device, par_dim=num_parameters, num_fc_layers=n_fc_l, num_fc_neurons=n_fc_n, @@ -388,6 +526,7 @@ if __name__ == "__main__": learning_rate=learning_rate, decay_start=decay_start, decay_rate=decay_rate, + low_cut = low_cut, train_epochs=train_epochs, length=length, batch=batch, @@ -400,7 +539,9 @@ if __name__ == "__main__": noise_var=noise_var, num_samples=10000, basedir=basedir, - test_dir = test_dir) + test_dir = test_dir, + datapp = datapp, + snrlim=snrlim) diff --git a/run_dnn_iteration.py b/run_dnn_iteration.py new file mode 100755 index 0000000000000000000000000000000000000000..36e6b13b3316f41c4d05b8dca9c36bbb380b6f14 --- /dev/null +++ b/run_dnn_iteration.py @@ -0,0 +1,458 @@ +#!/usr/bin/env python +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader +from torchvision import datasets, transforms +import matplotlib.pyplot as plt +import numpy as np +import corner +import os +import sys +import pickle +from scipy.stats import binom +from dnn import DNN +import mpmath +import time +#from cvae_conv import CVAE as CVAE_CONV +import emcee +#from mcmc_test import pp_plot +#sys.path.append("../gendata/") +import gen_data +#import gen_cw +import copy + +def adjust_learning_rate(lr, optimizer, epoch, decay_rate = 0.99): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = lr * (decay_rate ** (epoch // 5)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + +def loss_func_lin(outs, labels): + + out1 = outs[outs[:,0] < outs[:,1]] + out2 = outs[outs[:,0] >= outs[:,1]] + + labels1 = labels[outs[:,0] < outs[:,1]][:,1] + labels2 = labels[outs[:,0] >= outs[:,1]][:,1] + + #print(out1.size(), out1[:,0].size(), out1[:,1].size(), labels1.size()) + #print(out2.size(), out2[:,0].size(), out2[:,1].size(), labels2.size()) + + l1 = labels1*(out1[:,0] - out1[:,1]) + out1[:,1] - out1[:,0] - torch.log(1 + torch.exp(out1[:,1] - out1[:,0])) + l2 = labels2*(out2[:,0] - out2[:,1]) - torch.log(1 + torch.exp(out2[:,0] - out2[:,1])) + + return -torch.mean(torch.cat((l1,l2))) + +def train_batch(epoch, model, optimizer, device, batch, labels, ramp=1.0, train = True): + model.train(train) + if train: + optimizer.zero_grad() + length = float(batch.size(0)) + # calculate r2, q and r1 means and variances + out_x = model(batch) + #loss_fn = nn.BCELoss() + #loss = loss_func_lin(out_x.reshape(np.shape(labels)), labels) + loss_fn = nn.NLLLoss() # use with logsoftma + loss = loss_fn(out_x.reshape(np.shape(labels)), labels[:,0].long()) + + if train: + loss.backward() + # update the weights + optimizer.step() + + return loss.item() + +def train(model, device, epochs, learning_rate, test_iterator, nest_odds, true_mod, num_samples, save_dir, transfer_batch, iteration_batch, decay_start=100, decay_rate = 0.99,snrlim = 10): + + optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate) + + train_losses = [] + val_losses = [] + + odds_last_est = [] + odds_last_nest = [] + true_mods_last = [] + + train_data = gen_data.GenSG(length=length, package = "keras",onehot=True) + validation_data = gen_data.GenSG(length=length, package = "keras",onehot=True) + + for epoch in range(epochs): + + if epoch > decay_start: + adjust_learning_rate(learning_rate, optimizer, epoch, decay_rate=decay_rate) + + train_data.gen_train_data(transfer_batch, noise_var = noise_var,snr_lim=snrlim) + validation_data.gen_train_data(transfer_batch, noise_var = noise_var,snr_lim=snrlim) + local_batch, local_labels = torch.Tensor(np.array(train_data.data[0]).astype(float)).to(device), torch.Tensor(np.array([ list(v[0]) for v in train_data.data[1] ]).astype(float)).to(device) + + for temp_ind in range(int(len(local_batch)/iteration_batch)): + train_loss = train_batch(epoch, model, optimizer, device, local_batch[temp_ind*iteration_batch:(temp_ind+1)*iteration_batch],local_labels[temp_ind*iteration_batch:(temp_ind+1)*iteration_batch], train=True) + train_losses.append(train_loss) + # validation + # Transfer to GPU + val_batch, val_labels = torch.Tensor(np.array(validation_data.data[0]).astype(float)).to(device), torch.Tensor(np.array([ list(v[0]) for v in validation_data.data[1] ]).astype(float)).to(device) + for temp_ind in range(int(len(val_batch)/iteration_batch)): + val_loss = train_batch(epoch, model, optimizer, device, val_batch[temp_ind*iteration_batch:(temp_ind+1)*iteration_batch],val_labels[temp_ind*iteration_batch:(temp_ind+1)*iteration_batch], train=False) + val_losses.append(val_loss) + + if epoch % int(epochs/20) == 0: + print("Epoch: {}, Training loss: {}, Validation loss: {}".format(epoch,train_loss, val_loss)) + + + loss_plot(save_dir, train_losses, val_losses) + est_odds,_, snrs = run(model,test_iterator,device=device) + + fig, ax = plt.subplots() + ax.plot(snrs, np.log10(copy.copy(est_odds)), marker = "o", ms = 5, ls = "none") + ax.set_xlabel("snr") + ax.set_ylabel("log10 odds") + fig.savefig('{}/snr_vs_odds_epoch{}.png'.format(save_dir,epoch)) + plt.close(fig) + + fig, ax = plt.subplots() + ax.plot(snrs, np.log10(copy.copy(nest_odds)), marker = "o", ms = 5, ls = "none") + ax.set_xlabel("snr") + ax.set_ylabel("log10 odds") + fig.savefig('{}/snr_vs_nest_odds_epoch{}.png'.format(save_dir,epoch)) + plt.close(fig) + + odds_fig_full = odds_plot_full(copy.copy(nest_odds),copy.copy(est_odds)) + odds_fig_full.savefig('{}/odds_full_epoch{}.png'.format(save_dir,epoch)) + odds_fig = odds_plot(copy.copy(nest_odds),copy.copy(est_odds),true_mod,num_samples) + odds_fig.savefig('{}/odds_epoch{}.png'.format(save_dir,epoch)) + + if epoch > epochs - 20: + odds_last_est.extend(est_odds) + odds_last_nest.extend(nest_odds) + true_mods_last.extend(true_mod) + + odds_fig = odds_plot(np.array(odds_last_nest), np.array(odds_last_est), np.array(true_mods_last), num_samples) + odds_fig.savefig('{}/odds_last_20epochs.png'.format(save_dir,epoch)) + return train_losses,val_losses + +def dnn_odds(outputs, labels): + outs = [] + done = 0 + for i,test in enumerate(outputs): + odds = np.double(mpmath.exp(test[0] - test[1])) + outs.append(odds) + return np.array(outs) + + +def run(model,test_it,device="cpu"): + # set the evaluation mode + model.eval() + + # test loss for the data + test_loss = 0 + samples = {} + # we don't need to track the gradients, since we are not updating the parameters during evaluation / testing + with torch.no_grad(): + for local_batch, (local_labels, local_snr) in test_it: + # Transfer to GPU + local_batch, local_labels = local_batch.to(device), local_labels.to(device) + outputs = model.test(local_batch) + outputs = outputs.reshape((np.shape(outputs)[0], np.shape(outputs)[-1])) + odds = dnn_odds(torch.Tensor(outputs).numpy(),local_labels) + snrs = local_snr.cpu().numpy() * local_labels[:,1].cpu().numpy() + truths = local_labels + break + return odds,truths.cpu().numpy(), snrs + + +def loss_plot(save_dir, loss, val_loss, space = 10): + fig, ax = plt.subplots(nrows = 2,figsize=(15,15)) + xs = np.linspace(0, len(loss), len(loss)) + mean_loss = np.zeros(len(xs)) + width = 1000 + for i in range(len(xs)): + lind = i - width + hind = i + width + if hind >= len(loss): + hind = len(loss) + if lind < 0: + lind = 0 + mean_loss[i] = np.mean(loss[lind:hind]) + + ax[0].plot(xs, loss, label="loss", color="C0", alpha=0.5) + ax[0].plot(xs, val_loss, label="val loss", color="C1", ls="--", lw=3, alpha=0.5) + ax[0].plot(xs, mean_loss, label="meanloss", color = "C3", ls = "-", lw = 3) + + ax[1].plot(xs, loss, label="loss", color="C0", alpha=0.5) + ax[1].plot(xs, val_loss, label="val loss", color="C1", ls="--", lw=3, alpha=0.5) + ax[1].plot(xs, mean_loss, label="meanloss", color = "C3", ls = "-", lw = 3) + + ax[1].set_xlabel("Epoch") + ax[0].legend(fontsize=20) + ax[1].legend(fontsize=20) + ax[0].grid() + ax[1].grid() + ax[1].set_yscale("log") + ax[1].set_xscale("log") + #ax.set_yscale("symlog") + fig.savefig(os.path.join(save_dir, "loss.png")) + plt.close(fig) + +def odds_plot(trueOdds,estOdds,trueMod,num_samples=1000): + + fig = plt.figure() + + trues = np.array([pr/(1-pr) for pr in np.linspace(0,1,num_samples)]) + ovar = np.array([np.sqrt((1-pr)/(num_samples*pr) + pr/(num_samples*(1-pr))) for pr in np.linspace(0,1,num_samples)]) + + # identify the indices of where any results are zero or infinite + k = np.argwhere((trueOdds==0.0) + (estOdds==0.0) + (trueOdds>num_samples) + (estOdds>num_samples)) + + # cap the true and est odds to the cvae limits + j = np.argwhere(trueOdds>num_samples) + trueOdds[j] = num_samples - 1.0 + j = np.argwhere(trueOdds==0.0) + trueOdds[j] = 1.0/num_samples + j = np.argwhere(estOdds==0.0) + estOdds[j] = 1.0/num_samples + j = np.argwhere(estOdds>num_samples) + estOdds[j] = num_samples - 1.0 + + # identify the indices of results that fall in the good zone + noise_idx = np.isfinite(trueOdds)*np.isfinite(estOdds)*(trueMod<1.0)*(trueOdds>0.0)*(estOdds>0.0) + signal_idx = np.isfinite(trueOdds)*np.isfinite(estOdds)*(trueMod>0.0)*(trueOdds>0.0)*(estOdds>0.0) + + plt.plot(np.log(trueOdds[noise_idx]),np.log(estOdds[noise_idx]),'r.',label='noise') + plt.plot(np.log(trueOdds[signal_idx]),np.log(estOdds[signal_idx]),'b.',label='signal') + plt.plot(np.log(trueOdds[k]),np.log(estOdds[k]),'ko',label='out of bounds') + plt.plot(np.log([1.0/num_samples, num_samples]),np.log([1.0/num_samples, num_samples]),'-k',alpha=0.5) + plt.plot(np.log(trues),np.log(trues) + ovar,'-r',alpha=0.5) + plt.plot(np.log(trues),np.log(trues) - ovar,'-r',alpha=0.5) + plt.xlim([np.log(1.0/num_samples)-1, np.log(num_samples)+1]) + plt.ylim([np.log(1.0/num_samples)-1, np.log(num_samples)+1]) + plt.xlabel('true Odds') + plt.ylabel('estimated Odds') + plt.legend() + return fig + +def odds_plot_full(true_odds,est_odds): + + fig,ax = plt.subplots(figsize = (20,10)) + + ax.plot(np.log(true_odds), np.log(est_odds), ".") + ax.plot([-10,60],[-10,60],"r") + ax.set_xlim([-10,60]) + ax.set_ylim([-10,60]) + ax.set_xlabel("Numerical odds", fontsize = 20) + ax.set_ylabel("NN odds", fontsize = 20) + return fig + +def test_model(save_dir, + model, + run_data, + nestOdds, + trueMod, + datatype = "line", + length=25, + num_test=100, + num_samples=1000, + noise_var=0.1 + ): + + run_iterator = DataLoader(run_data, batch_size=num_test) + samples,truths = run(model,run_iterator,device=device) + estOdds = samples#dnn_odds(samples) + + test_data = os.path.join(save_dir,"test_data") + test_data_plots = os.path.join(save_dir,"test_data_plots") + test_plots = os.path.join(save_dir,"test_plots") + for d in [test_data, test_data_plots,test_plots]: + if not os.path.isdir(d): + os.makedirs(d) + + with open(os.path.join(save_dir,"sample.pkl"),"wb") as f: + pickle.dump(samples,f) + + odds_fig = odds_plot(nestOdds,estOdds,trueMod,num_samples) + odds_fig.savefig('{}/odds.png'.format(save_dir)) + + for ind in range(num_test): + + fig, ax = plt.subplots() + ax.plot(run_data.data[ind][0],label = [val for val in run_data.data[ind][1]]) + ax.set_ylim([-1.2,1.2]) + fig.savefig(os.path.join(test_data_plots,"test_{}_s{}.png".format(ind, run_data.data[ind][1]))) + plt.close(fig) + with open(os.path.join(test_data,"test_{}.pkl".format(ind)),"wb") as f: + pickle.dump(run_data.data[ind],f) + + +def run_dnn_lin(device, + do_train=True, + do_test=True, + par_dim=2, + num_conv_layers=2, + num_fc_layers=2, + num_conv_filters=8, + conv_size=5, + pool_size=2, + stride=1, + dilation = 1, + num_fc_neurons=8, + learning_rate=1e-4, + decay_start = 100, + decay_rate = 0.99, + train_epochs=50, + num_samples=1000, + noise_var=0.1, + length=25, + iteration_batch=500, + transfer_batch = 5000, + num_test = 100, + datatype="line", + dropout=0.1, + basedir=None, + test_dir = None, + datapp = "", + snrlim=10): + + + print("Test Dir: {}".format(test_dir)) + with open(os.path.join(test_dir, "modelclass.pkl"),"rb") as f: + test_data = pickle.load(f) + + test_iterator = DataLoader(test_data, batch_size=num_test, shuffle=False) # single batch + + # precompute the true odds + with open(os.path.join(test_dir, "nest_odds.pkl"),"rb") as f: + nest_odds = pickle.load(f) + + true_mod = np.array([float(i[1][1]) for i in test_data.data]) + + fc_layers = [num_fc_neurons for i in range(num_fc_layers)] + conv_layers = [(num_conv_filters, conv_size, stride, dilation, pool_size) for i in range(num_conv_layers)] + + #if network=='cnn': + model = DNN(device=device, + input_dim=length, + par_dim = par_dim, + fc_layers = fc_layers, + conv_layers=conv_layers, + ).to(device) + + save_dir = '{}/{}{}_iteration/ntrain{}_xlen{}_noise{}_snrlim{}/epoch{}_lr{}_dec{}_{}_trbatch{}_itbatch{}_nconv{}_nconvn{}_convsize{}_pool{}_stride{}_dilation{}_drop{}_nfc{}_nfcn{}_r1_NLLloss'.format( + basedir,datatype, datapp, + num_train,length,noise_var,snrlim, + train_epochs,learning_rate,decay_start,decay_rate,transfer_batch,iteration_batch, + num_conv_layers,num_conv_filters,conv_size,pool_size,stride,dilation,dropout, + num_fc_layers,num_fc_neurons) + + if not os.path.isdir(save_dir): + os.makedirs(save_dir) + + # train the network + if do_train: + losses, val_loss= train(model, device, train_epochs, learning_rate, test_iterator, nest_odds, true_mod, num_samples, save_dir, transfer_batch = transfer_batch, iteration_batch = iteration_batch, decay_rate = decay_rate, decay_start=decay_start,snrlim=snrlim) + loss_plot(save_dir, losses, val_loss ) # make the loss plot + torch.save(model, os.path.join(save_dir,"model.pt")) # save the model + else: + try: + model = torch.load(os.path.join(save_dir,"model.pt")).to(device) + except: + print('ERROR: unable to load model {}. Exiting.'.format(os.path.join(save_dir,"model.pt"))) + exit(0) + + # test the model + test_model(save_dir, model, test_data, nest_odds, true_mod, length=length, num_test=num_test, num_samples=num_samples, datatype=datatype) + print('{}: saved results to {}'.format(time.asctime(),save_dir)) + + +if __name__ == "__main__": + + + # data parameters + num_train = int(1e6) + num_test = 300 + transfer_batch = 10000 + iteration_batch = 512 + length = 256 + datatype = "SG" + noise_var = 0.1 + snrlim = (0,1000) + + # network parameters + train_epochs = 20000 + learning_rate = 3e-4 + decay_start = 1000 + decay_rate = 0.998#5e-2 + + # network layers + num_parameters = 2 + + # dnn network + num_hidden = [5]#[0,1,2] + num_neurons = [128]#[8,16,32] + dropout = [0.0] + + conv_size = [32]#[3,5] + num_conv_filters = [64] + num_conv = [3] + maxpool_size = [2] + stride = 1 + dilation = 1 + + # train or test or both + do_train = True + do_test = True + #datapp = "_log1.5_6A" + datapp = ""#"_uniformA" + basedir = '/home/joseph.bayley/public_html/cvae_odds/dnn/' + test_dir = "/home/joseph.bayley/data/cvae_odds/test/" + test_dir = '{}/{}{}/ntest{}_xlen{}_noise{}/'.format(test_dir,datatype,datapp,num_test,length,noise_var) + + os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7" + + # make double the default type for tensors + torch.set_default_tensor_type(torch.DoubleTensor) + + #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + device = torch.device('cuda', 7) + print(device, torch.cuda.get_device_name(device)) + + + + for n_fc_n in num_neurons: + for n_fc_l in num_hidden: + for drop in dropout: + for c_size in conv_size: + for n_conv_l in num_conv: + for n_conv_f in num_conv_filters: + for mp in maxpool_size: + # run the analysis + run_dnn_lin(device, + par_dim=num_parameters, + num_fc_layers=n_fc_l, + num_fc_neurons=n_fc_n, + num_conv_layers=n_conv_l, + num_conv_filters=n_conv_f, + conv_size=c_size, + pool_size=mp, + stride=stride, + dilation = dilation, + learning_rate=learning_rate, + decay_start=decay_start, + decay_rate=decay_rate, + train_epochs=train_epochs, + length=length, + transfer_batch=transfer_batch, + iteration_batch=iteration_batch, + num_test=num_test, + datatype=datatype, + do_train=do_train, + do_test=do_test, + dropout=drop, + noise_var=noise_var, + num_samples=10000, + basedir=basedir, + test_dir = test_dir, + datapp = datapp, + snrlim=snrlim) + + + + diff --git a/run_dnn_iteration_cbc.py b/run_dnn_iteration_cbc.py new file mode 100755 index 0000000000000000000000000000000000000000..da07c783658a78120cc0496d062a3aab354fa547 --- /dev/null +++ b/run_dnn_iteration_cbc.py @@ -0,0 +1,588 @@ +#!/usr/bin/env python +import torch +import torch.nn as nn +from torch.utils.data import Dataset, DataLoader +from torchvision import datasets, transforms +import matplotlib.pyplot as plt +import numpy as np +import corner +import os +import sys +import pickle +from scipy.stats import binom +from dnn import DNN +import mpmath +import time +#from cvae_conv import CVAE as CVAE_CONV +import emcee +#from mcmc_test import pp_plot +#sys.path.append("../gendata/") +import gen_data +#import gen_cw +import copy +import load_data_cbc +import json +import h5py +import natsort + +def adjust_learning_rate(lr, optimizer, epoch, decay_rate = 0.99): + """Sets the learning rate to the initial LR decayed by 10 every 30 epochs""" + lr = lr * (decay_rate ** (epoch // 5)) + for param_group in optimizer.param_groups: + param_group['lr'] = lr + +def loss_func_lin(outs, labels): + + out1 = outs[outs[:,0] < outs[:,1]] + out2 = outs[outs[:,0] >= outs[:,1]] + + labels1 = labels[outs[:,0] < outs[:,1]][:,1] + labels2 = labels[outs[:,0] >= outs[:,1]][:,1] + + #print(out1.size(), out1[:,0].size(), out1[:,1].size(), labels1.size()) + #print(out2.size(), out2[:,0].size(), out2[:,1].size(), labels2.size()) + + l1 = labels1*(out1[:,0] - out1[:,1]) + out1[:,1] - out1[:,0] - torch.log(1 + torch.exp(out1[:,1] - out1[:,0])) + l2 = labels2*(out2[:,0] - out2[:,1]) - torch.log(1 + torch.exp(out2[:,0] - out2[:,1])) + + return -torch.mean(torch.cat((l1,l2))) + +def train_batch(epoch, model, optimizer, device, batch, labels, ramp=1.0, train = True): + model.train(train) + if train: + optimizer.zero_grad() + #length = float(batch.size(0)) + # calculate r2, q and r1 means and variances + out_x = model(batch) + #loss_fn = nn.BCELoss() + #loss = loss_func_lin(out_x.reshape(np.shape(labels)), labels) + loss_fn = nn.NLLLoss() # use with logsoftma + + loss = loss_fn(out_x, labels.long()) + + if train: + loss.backward() + # update the weights + optimizer.step() + + return loss.item() + +def train(model, device, epochs, learning_rate, train_data, validation_data, test_data, nest_odds, true_mod, num_samples, save_dir, transfer_batch, iteration_batch, decay_start=100, decay_rate = 0.99,snrlim = 10): + + optimizer = torch.optim.Adam(model.parameters(),lr=learning_rate) + + train_losses = [] + val_losses = [] + + odds_last_est = [] + odds_last_nest = [] + true_mods_last = [] + plot_test = True + plot_train = True + plot_val = True + print("loading intial data") + train_data.load_next_chunk() + + for epoch in range(epochs): + + if epoch > decay_start: + adjust_learning_rate(learning_rate, optimizer, epoch, decay_rate=decay_rate) + + temp_train_losses = [] + for temp_ind in range(int(transfer_batch/iteration_batch)): + train_batch_temp, train_label = train_data[temp_ind] + if temp_ind == 0 and plot_train: + print(len(train_label == 0)) + randinds = np.random.randint(0, len(train_batch_temp), 10) + for i, tb in enumerate(randinds): + fig, ax = plt.subplots() + ax.plot(train_batch_temp[tb]) + ax.set_title("Label: {}".format(train_label[tb])) + fig.savefig('{}/train_example_{}_{}.png'.format(save_dir,i, train_label[tb])) + plt.close(fig) + plot_train = False + train_loss = train_batch(epoch, model, optimizer, device, train_batch_temp.to(device),train_label.to(device), train=True) + temp_train_losses.append(train_loss) + train_losses.append(np.mean(temp_train_losses)) + # validation + # Transfer to GPU + val_batch_temp, val_label_temp = validation_data[0] + if plot_val: + randinds = np.random.randint(0, len(val_batch_temp), 10) + for i, vb in enumerate(randinds): + fig, ax = plt.subplots() + ax.plot(val_batch_temp[vb]) + ax.set_title("Label: {}".format(val_label_temp[vb])) + fig.savefig('{}/val_example_{}_{}.png'.format(save_dir,i, val_label_temp[vb])) + plt.close(fig) + plot_val = False + val_loss = train_batch(epoch, model, optimizer, device, val_batch_temp.to(device),val_label_temp.to(device), train=False) + val_losses.append(np.mean(val_loss)) + + if epoch % int(epochs/20) == 0: + print("Epoch: {}, Training loss: {}, Validation loss: {}".format(epoch,train_loss, val_loss)) + + loss_plot(save_dir, train_losses, val_losses) + est_odds,est_log_odds, temp_truths, temp_prob, temp_logprob, outputs = run(model,test_data,device=device) + val_est_odds,val_est_log_odds, val_temp_truths, val_temp_prob, val_temp_logprob, val_outputs = run(model,validation_data,device=device) + if plot_test: + randinds = np.random.randint(0, len(test_data.Y_noisy), 10) + for i,test_ind in enumerate(randinds): + fig, ax = plt.subplots() + ax.plot(test_data.Y_noisy[test_ind]) + fig.savefig('{}/test_example_{}_{}.png'.format(save_dir,i, test_data.X[test_ind])) + plt.close(fig) + plot_test = False + print(nest_odds, est_odds) + print("odds_shape", np.shape(nest_odds), np.shape(est_odds)) + + fig, ax = plt.subplots() + zero_samples = val_temp_prob[val_temp_truths == 1] + sig_samples = val_temp_prob[val_temp_truths == 0] + bins = np.linspace(0,1,30) + ax.hist(zero_samples,color="C0", bins = bins) + ax.hist(sig_samples,color="C1", bins = bins) + fig.savefig(os.path.join(save_dir, "test_hist_{}.png".format(epoch))) + plt.close(fig) + + fig, ax = plt.subplots() + log_zero_samples = val_temp_logprob[val_temp_truths == 1] + log_sig_samples = val_temp_logprob[val_temp_truths == 0] + ax.hist(log_zero_samples,color="C0", bins = 30) + ax.hist(log_sig_samples,color="C1", bins = 30) + fig.savefig(os.path.join(save_dir, "log_test_hist_{}.png".format(epoch))) + plt.close(fig) + + first_snr = copy.copy(test_data.snrs[0]) + fig, ax = plt.subplots() + ax.plot(copy.copy(test_data.snrs), np.log(copy.copy(nest_odds)),color="C0", marker="o", ls = "none",ms = 5) + ax.set_xlabel("SNR") + ax.set_ylabel("log nest odds") + fig.savefig(os.path.join(save_dir, "log_nestodd_snr.png")) + plt.close(fig) + + first_snr = copy.copy(test_data.snrs[0]) + fig, ax = plt.subplots() + ax.plot(copy.copy(test_data.snrs), copy.copy(nest_odds),color="C0", marker="o", ls = "none",ms = 5) + ax.set_xlabel("SNR") + ax.set_ylabel("nest odds") + fig.savefig(os.path.join(save_dir, "nestodd_snr.png")) + plt.close(fig) + + fig, ax = plt.subplots() + ax.plot(copy.copy(test_data.snrs), np.log10(copy.copy(est_odds)), color="C0", marker="o", ls = "none",ms = 5) + ax.set_xlabel("SNR") + ax.set_ylabel("log10 CNN est odds") + fig.savefig(os.path.join(save_dir, "estodd_snr_{}.png".format(epoch))) + plt.close(fig) + + fig, ax = plt.subplots() + ax.plot(copy.copy(test_data.snrs), copy.copy(est_log_odds), color="C0", marker="o", ls = "none",ms = 5) + ax.set_xlabel("SNR") + ax.set_ylabel("log CNN est odds") + fig.savefig(os.path.join(save_dir, "estodd_snr_{}.png".format(epoch))) + plt.close(fig) + + print("test_snrs0", first_snr - test_data.snrs[0]) + + fig, ax = plt.subplots() + ax.plot(validation_data.snrs, val_est_log_odds,color="C0", marker="o", ls = "none",ms = 5) + ax.set_xlabel("SNR") + ax.set_ylabel("log10 CNN est odds") + fig.savefig(os.path.join(save_dir, "val_estodd_snr_{}.png".format(epoch))) + plt.close(fig) + + odds_fig_full = odds_plot_full(copy.copy(nest_odds),copy.copy(est_odds)) + odds_fig_full.savefig('{}/odds_full_epoch{}.png'.format(save_dir,epoch)) + odds_fig = odds_plot(np.exp(copy.copy(nest_odds)),copy.copy(est_odds),true_mod,num_samples) + odds_fig.savefig('{}/odds_epoch{}.png'.format(save_dir,epoch)) + + if epoch > epochs - 20: + odds_last_est.extend(est_odds) + odds_last_nest.extend(nest_odds) + true_mods_last.extend(true_mod) + + if epoch % 20 == 0: + train_data.load_next_chunk() + + odds_fig = odds_plot(np.exp(np.array(odds_last_nest)), np.array(odds_last_est), np.array(true_mods_last), num_samples) + odds_fig.savefig('{}/odds_last_20epochs.png'.format(save_dir,epoch)) + return train_losses,val_losses + +def dnn_odds(outputs): + outs = [] + log_outs = [] + probs = [] + logprobs = [] + done = 0 + print(np.shape(outputs)) + for i,test in enumerate(outputs): + logodds = test[1] - test[0] + odds = np.double(mpmath.exp(logodds)) + # test[1] is the probability of a signal, test[0] is probability of noise + prob = np.double(mpmath.exp(test[1])) + logprobs.append(np.double(test[1])) + outs.append(odds) + log_outs.append(logodds) + probs.append(prob) + return np.array(outs), np.array(log_outs), np.array(probs), np.array(logprobs) + + +def run(model,test_it,device="cpu"): + # set the evaluation mode + model.eval() + + # test loss for the data + test_loss = 0 + samples = {} + # we don't need to track the gradients, since we are not updating the parameters during evaluation / testing + with torch.no_grad(): + # Transfer to GPU + local_batch, local_labels = test_it[0] + print("in shape",np.shape(local_batch)) + outputs = model.test(torch.Tensor(local_batch).to(device)) + print("out shape", np.shape(outputs)) + odds, log_odds, probs, log_probs = dnn_odds(outputs) + truths = local_labels + return odds, log_odds, truths, probs, log_probs, outputs + + +def loss_plot(save_dir, loss, val_loss, space = 10): + fig, ax = plt.subplots(nrows = 2,figsize=(15,15)) + xs = np.linspace(0, len(loss), len(loss)) + mean_loss = np.zeros(len(xs)) + width = 1000 + for i in range(len(xs)): + lind = i - width + hind = i + width + if hind >= len(loss): + hind = len(loss) + if lind < 0: + lind = 0 + mean_loss[i] = np.mean(loss[lind:hind]) + + ax[0].plot(xs, loss, label="loss", color="C0", alpha=0.5) + ax[0].plot(xs, val_loss, label="val loss", color="C1", ls="--", lw=3, alpha=0.5) + ax[0].plot(xs, mean_loss, label="meanloss", color = "C3", ls = "-", lw = 3) + + ax[1].plot(xs, loss, label="loss", color="C0", alpha=0.5) + ax[1].plot(xs, val_loss, label="val loss", color="C1", ls="--", lw=3, alpha=0.5) + ax[1].plot(xs, mean_loss, label="meanloss", color = "C3", ls = "-", lw = 3) + + ax[1].set_xlabel("Epoch") + ax[0].legend(fontsize=20) + ax[1].legend(fontsize=20) + ax[0].grid() + ax[1].grid() + ax[1].set_yscale("log") + ax[1].set_xscale("log") + #ax.set_yscale("symlog") + fig.savefig(os.path.join(save_dir, "loss.png")) + plt.close(fig) + +def odds_plot(trueOdds,estOdds,trueMod,num_samples=1000): + + fig = plt.figure() + + trues = np.array([pr/(1-pr) for pr in np.linspace(0,1,num_samples)]) + ovar = np.array([np.sqrt((1-pr)/(num_samples*pr) + pr/(num_samples*(1-pr))) for pr in np.linspace(0,1,num_samples)]) + + # identify the indices of where any results are zero or infinite + k = np.argwhere((trueOdds==0.0) + (estOdds==0.0) + (trueOdds>num_samples) + (estOdds>num_samples)) + + # cap the true and est odds to the cvae limits + j = np.argwhere(trueOdds>num_samples) + trueOdds[j] = num_samples - 1.0 + j = np.argwhere(trueOdds==0.0) + trueOdds[j] = 1.0/num_samples + j = np.argwhere(estOdds==0.0) + estOdds[j] = 1.0/num_samples + j = np.argwhere(estOdds>num_samples) + estOdds[j] = num_samples - 1.0 + + # identify the indices of results that fall in the good zone + noise_idx = np.isfinite(trueOdds)*np.isfinite(estOdds)*(trueMod<1.0)*(trueOdds>0.0)*(estOdds>0.0) + signal_idx = np.isfinite(trueOdds)*np.isfinite(estOdds)*(trueMod>0.0)*(trueOdds>0.0)*(estOdds>0.0) + + plt.plot(np.log(trueOdds[noise_idx]),np.log(estOdds[noise_idx]),'r.',label='noise') + plt.plot(np.log(trueOdds[signal_idx]),np.log(estOdds[signal_idx]),'b.',label='signal') + plt.plot(np.log(trueOdds[k]),np.log(estOdds[k]),'ko',label='out of bounds') + plt.plot(np.log([1.0/num_samples, num_samples]),np.log([1.0/num_samples, num_samples]),'-k',alpha=0.5) + plt.plot(np.log(trues),np.log(trues) + ovar,'-r',alpha=0.5) + plt.plot(np.log(trues),np.log(trues) - ovar,'-r',alpha=0.5) + plt.xlim([np.log(1.0/num_samples)-1, np.log(num_samples)+1]) + plt.ylim([np.log(1.0/num_samples)-1, np.log(num_samples)+1]) + plt.xlabel('true Odds') + plt.ylabel('estimated Odds') + plt.legend() + return fig + +def odds_plot_full(true_odds,est_odds): + + fig,ax = plt.subplots(figsize = (20,10)) + + ax.plot(np.log10(true_odds), np.log10(est_odds), ".") + ax.plot([-10,60],[-10,60],"r") + ax.set_xlim([-10,60]) + ax.set_ylim([-10,60]) + ax.set_xlabel("log10 Numerical odds", fontsize = 20) + ax.set_ylabel("log10 NN odds", fontsize = 20) + return fig + +def test_model(save_dir, + model, + run_data, + nestOdds, + trueMod, + datatype = "line", + length=25, + num_test=100, + num_samples=1000, + noise_var=0.1 + ): + + samples, log_odds, truths, prob, logprob, output = run(model,run_data,device=device) + estOdds = samples#dnn_odds(samples) + + test_data = os.path.join(save_dir,"test_data") + test_data_plots = os.path.join(save_dir,"test_data_plots") + test_plots = os.path.join(save_dir,"test_plots") + for d in [test_data, test_data_plots,test_plots]: + if not os.path.isdir(d): + os.makedirs(d) + + zero_samples = prob[truths == 1,0] + sig_samples = prob[truths == 0,0] + fig, ax = plt.subplots() + bins = np.linspace(0,1,30) + ax.hist(zero_samples,color="C0", bins = bins) + ax.hist(sig_samples,color="C1", bins = bins) + fig.savefig(os.path.join(save_dir, "test_hist.png")) + + with open(os.path.join(save_dir,"sample.pkl"),"wb") as f: + pickle.dump(samples,f) + + odds_fig = odds_plot(nestOdds,estOdds,trueMod,num_samples) + odds_fig.savefig('{}/odds.png'.format(save_dir)) + + for ind in range(num_test): + + fig, ax = plt.subplots() + ax.plot(run_data.Y_noisefree[ind][0],label = [val for val in run_data.X[ind]]) + ax.plot(run_data.Y_noisefree[ind][1],label = [val for val in run_data.X[ind]]) + ax.set_ylim([-1.2,1.2]) + fig.savefig(os.path.join(test_data_plots,"test_{}_s{}.png".format(ind, run_data.X[ind]))) + plt.close(fig) + with open(os.path.join(test_data,"test_{}.pkl".format(ind)),"wb") as f: + pickle.dump([run_data.X[ind], run_data.Y_noisefree[ind]],f) + + +def run_dnn_lin(device, + do_train=True, + do_test=True, + par_dim=2, + num_conv_layers=2, + num_fc_layers=2, + num_conv_filters=8, + conv_size=5, + pool_size=2, + stride=1, + dilation = 1, + num_fc_neurons=8, + learning_rate=1e-4, + decay_start = 100, + decay_rate = 0.99, + train_epochs=50, + num_samples=1000, + noise_var=0.1, + length=25, + iteration_batch=500, + transfer_batch = 5000, + num_test = 100, + datatype="line", + dropout=0.1, + basedir=None, + test_dir = None, + datapp = "", + snrlim=10): + + with open("./params_files_256_narrowspin_8pars/params.json","r") as f: + params = json.load(f) + + with open("./params_files_256_narrowspin_8pars/bounds.json","r") as f: + bounds = json.load(f) + + with open("./params_files_256_narrowspin_8pars/fixed_vals.json","r") as f: + fixed_vals = json.load(f) + + print("y_normscale", params["y_normscale"]) + params, bounds, masks, fixed_vals = load_data_cbc.get_params(params, bounds, fixed_vals, params_dir = "./params_files_256_narrowspin") + + + num_channels = len(params["det"]) + + #print("Test Dir: {}".format(test_dir)) + #with open(os.path.join(test_dir, "modelclass.pkl"),"rb") as f: + # test_data = pickle.load(f) + + #test_iterator = DataLoader(test_data, batch_size=num_test, shuffle=False) # single batch + + # precompute the true odds + #with open(os.path.join(test_dir, "nest_odds.pkl"),"rb") as f: + # nest_odds = pickle.load(f) + + #true_mod = np.array([float(i[1][1]) for i in test_data.data]) + + fc_layers = [num_fc_neurons for i in range(num_fc_layers)] + conv_layers = [(num_conv_filters, conv_size, stride, dilation, pool_size) for i in range(num_conv_layers)] + + train_data =load_data_cbc.LoadCBC(params["train_set_dir"], params = params, bounds = bounds, masks = masks, fixed_vals = fixed_vals, chunk_batch = 40, batch_size = iteration_batch) + validation_data =load_data_cbc.LoadCBC(params["val_set_dir"], params = params, bounds = bounds, masks = masks, fixed_vals = fixed_vals, chunk_batch = 2, val_set = True) + + test_size = 40 + test_data = load_data_cbc.LoadCBC(params["test_set_dir"], params = params, bounds = bounds, masks = masks,fixed_vals = fixed_vals, chunk_batch = test_size, test_set = True) + test_data.load_next_chunk() + validation_data.load_next_chunk() + + + + fpath = params["test_set_dir"].replace("test_waveforms", "test_dynesty1") + log_nest_odds = [] + for filename in natsort.natsorted(os.listdir(fpath)): + if filename.endswith(".h5py"): + h5py_file = h5py.File(os.path.join(fpath,filename), 'r') + log_nest_odds.append(np.array(h5py_file['log_bayes_factor'])) + + nest_odds = np.exp(log_nest_odds) + true_mod = np.ones(len(nest_odds)) + + #if network=='cnn': + model = DNN(device=device, + input_dim=length, + par_dim = par_dim, + fc_layers = fc_layers, + conv_layers=conv_layers, + num_channels = num_channels, + ).to(device) + + save_dir = '{}/CBC/ntrain{}_xlen{}_8pars/epoch{}_lr{}_dec{}_{}_trbatch{}_itbatch{}_nconv{}_nconvn{}_convsize{}_pool{}_stride{}_dilation{}_drop{}_nfc{}_nfcn{}_r1_NLLloss_leakyreluact'.format( + basedir, + num_train,length, + train_epochs,learning_rate,decay_start,decay_rate,transfer_batch,iteration_batch, + num_conv_layers,num_conv_filters,conv_size,pool_size,stride,dilation,dropout, + num_fc_layers,num_fc_neurons) + + if not os.path.isdir(save_dir): + os.makedirs(save_dir) + + # train the network + if do_train: + losses, val_loss= train(model, device, train_epochs, learning_rate, train_data, validation_data, test_data, nest_odds, true_mod, num_samples, save_dir, transfer_batch = transfer_batch, iteration_batch = iteration_batch, decay_rate = decay_rate, decay_start=decay_start,snrlim=snrlim) + loss_plot(save_dir, losses, val_loss ) # make the loss plot + torch.save(model, os.path.join(save_dir,"model.pt")) # save the model + else: + try: + model = torch.load(os.path.join(save_dir,"model.pt")).to(device) + except: + print('ERROR: unable to load model {}. Exiting.'.format(os.path.join(save_dir,"model.pt"))) + exit(0) + + # test the model + test_model(save_dir, model, test_data, nest_odds, true_mod, length=length, num_test=num_test, num_samples=num_samples, datatype=datatype) + print('{}: saved results to {}'.format(time.asctime(),save_dir)) + + +if __name__ == "__main__": + + + # data parameters + num_train = int(1e6) + num_test = 300 + transfer_batch = 50000 + iteration_batch = 1024 + length = 256 + datatype = "CBC" + noise_var = 0.1 + snrlim = (0,1000) + + # network parameters + train_epochs = 5000 + learning_rate = 1e-5 + decay_start = 1000 + decay_rate = 0.998#5e-2 + + # network layers + num_parameters = 2 + + # dnn network + num_hidden = [4]#[0,1,2] + num_neurons = [128]#[8,16,32] + dropout = [0.0] + + conv_size = [16]#[3,5] + num_conv_filters = [32] + num_conv = [2] + maxpool_size = [8] + stride = 1 + dilation = 1 + + # train or test or both + do_train = True + do_test = True + #datapp = "_log1.5_6A" + datapp = ""#"_uniformA" + basedir = '/home/joseph.bayley/public_html/cvae_odds/dnn/' + test_dir = "/home/joseph.bayley/data/cvae_odds/test/" + test_dir = '{}/{}{}/ntest{}_xlen{}_noise{}/'.format(test_dir,datatype,datapp,num_test,length,noise_var) + + os.environ["CUDA_VISIBLE_DEVICES"] = "0,1,2,3,4,5,6,7" + + # make double the default type for tensors + torch.set_default_tensor_type(torch.DoubleTensor) + + #device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') + device = torch.device('cuda', 1) + print(device, torch.cuda.get_device_name(device)) + + + + for n_fc_n in num_neurons: + for n_fc_l in num_hidden: + for drop in dropout: + for c_size in conv_size: + for n_conv_l in num_conv: + for n_conv_f in num_conv_filters: + for mp in maxpool_size: + # run the analysis + run_dnn_lin(device, + par_dim=num_parameters, + num_fc_layers=n_fc_l, + num_fc_neurons=n_fc_n, + num_conv_layers=n_conv_l, + num_conv_filters=n_conv_f, + conv_size=c_size, + pool_size=mp, + stride=stride, + dilation = dilation, + learning_rate=learning_rate, + decay_start=decay_start, + decay_rate=decay_rate, + train_epochs=train_epochs, + length=length, + transfer_batch=transfer_batch, + iteration_batch=iteration_batch, + num_test=num_test, + datatype=datatype, + do_train=do_train, + do_test=do_test, + dropout=drop, + noise_var=noise_var, + num_samples=10000, + basedir=basedir, + test_dir = test_dir, + datapp = datapp, + snrlim=snrlim) + + + +