diff --git a/python/swiftest/swiftest/init_cond.py b/python/swiftest/swiftest/init_cond.py index 9ada311d9..91a2f8569 100644 --- a/python/swiftest/swiftest/init_cond.py +++ b/python/swiftest/swiftest/init_cond.py @@ -303,4 +303,5 @@ def vec2xr(param, idvals, namevals, v1, v2, v3, v4, v5, v6, GMpl=None, Rpl=None, bodyxr = bodyxr.assign_coords(name=('id', namevals)) ds = bodyxr.to_dataset(dim='vec') + ds = ds.reset_coords(names=["name"]) return ds \ No newline at end of file diff --git a/python/swiftest/swiftest/io.py b/python/swiftest/swiftest/io.py index 5cd294246..f4bde632d 100644 --- a/python/swiftest/swiftest/io.py +++ b/python/swiftest/swiftest/io.py @@ -707,7 +707,7 @@ def swiftest2xr(param, verbose=True): elif ((param['OUT_TYPE'] == 'NETCDF_DOUBLE') or (param['OUT_TYPE'] == 'NETCDF_FLOAT')): if verbose: print('\nCreating Dataset from NetCDF file') ds = xr.open_dataset(param['BIN_OUT'], mask_and_scale=False) - ds = clean_string_values(param, ds) + ds = clean_string_values(ds) else: print(f"Error encountered. OUT_TYPE {param['OUT_TYPE']} not recognized.") return None @@ -726,13 +726,12 @@ def string_converter(da): da = xstrip(da) return da -def clean_string_values(param, ds): +def clean_string_values(ds): """ Cleans up the string values in the DataSet that have artifacts as a result of coming from NetCDF Fortran Parameters ---------- - param : dict ds : xarray dataset Returns @@ -746,6 +745,25 @@ def clean_string_values(param, ds): return ds +def unclean_string_values(ds): + """ + Returns strings back to a format readable to NetCDF Fortran + + Parameters + ---------- + ds : xarray dataset + + Returns + ------- + ds : xarray dataset with the strings cleaned up + """ + + for c in string_varnames: + n = string_converter(ds[c]) + ds[c] = n.str.ljust(32).str.encode('utf-8') + return ds + + def swiftest_particle_stream(f): """ Reads in a Swiftest particle.dat file and returns a single frame of particle data as a datastream @@ -806,26 +824,25 @@ def swiftest_particle_2xr(param): return infoxr -#def convert_to_fortran_readable(ds): - -def swiftest_xr2infile(ds, param, framenum=-1): +def select_active_from_frame(ds, param, framenum=-1): """ - Writes a set of Swiftest input files from a single frame of a Swiftest xarray dataset + Selects a particular frame from a DataSet and returns only the active particles in that frame Parameters ---------- ds : xarray dataset - Dataset containing Swiftest n-body data in XV format - framenum : int - Time frame to use to generate the initial conditions. If this argument is not passed, the default is to use the last frame in the dataset. + Dataset containing Swiftest n-body data param : dict Swiftest input parameters. This method uses the names of the cb, pl, and tp files from the input + framenum : int (default=-1) + Time frame to extract. If this argument is not passed, the default is to use the last frame in the dataset. Returns ------- - A set of three input files for a Swiftest run + Dataset containing only the active particles at frame, framenum """ + # Select the input time frame from the Dataset frame = ds.isel(time=[framenum]) @@ -842,7 +859,7 @@ def is_not_nan(x): return np.invert(np.isnan(x)) # Remove the inactive particles - if param['OUT_FORM'] == 'XV': + if param['OUT_FORM'] == 'XV' or param['OUT_FORM'] == 'XVEL': active_masker = xr.apply_ufunc(is_not_nan, frame_both['xhx'].isel(time=0)) else: active_masker = xr.apply_ufunc(is_not_nan, frame_both['a'].isel(time=0)) @@ -855,21 +872,41 @@ def is_not_nan(x): frame = frame_both.combine_first(frame_time_only).combine_first(frame_id_only) frame = frame.transpose("time", "id") + return frame + +def swiftest_xr2infile(ds, param, framenum=-1): + """ + Writes a set of Swiftest input files from a single frame of a Swiftest xarray dataset + + Parameters + ---------- + ds : xarray dataset + Dataset containing Swiftest n-body data in XV format + param : dict + Swiftest input parameters. This method uses the names of the cb, pl, and tp files from the input + framenum : int (default=-1) + Time frame to use to generate the initial conditions. If this argument is not passed, the default is to use the last frame in the dataset. + + Returns + ------- + A set of input files for a new Swiftest run + """ + + frame = select_active_from_frame(ds, param, framenum) + if param['IN_TYPE'] == "NETCDF_DOUBLE" or param['IN_TYPE'] == "NETCDF_FLOAT": # Convert strings back to byte form and save the NetCDF file # Note: xarray will call the character array dimension string32. The Fortran code # will rename this after reading - for c in string_varnames: - n = string_converter(frame[c]) - frame[c] = n.str.ljust(32).str.encode('utf-8') + frame = unclean_string_values(frame) frame.to_netcdf(path=param['NC_IN']) return # All other file types need seperate files for each of the inputs cb = frame.where(frame.id == 0, drop=True) pl = frame.where(frame.id > 0, drop=True) - pl = pl.where(np.invert(np.isnan(pl['Gmass'])), drop=True).drop_vars(['J_2', 'J_4']) - tp = frame.where(np.isnan(frame['Gmass']), drop=True).drop_vars(['Gmass', 'radius', 'J_2', 'J_4']) + pl = pl.where(np.invert(np.isnan(pl['Gmass'])), drop=True).drop_vars(['J_2', 'J_4'],errors="ignore") + tp = frame.where(np.isnan(frame['Gmass']), drop=True).drop_vars(['Gmass', 'radius', 'J_2', 'J_4'],errors="ignore") GMSun = np.double(cb['Gmass']) if param['CHK_CLOSE'] == 'YES': diff --git a/python/swiftest/swiftest/simulation_class.py b/python/swiftest/swiftest/simulation_class.py index 6a5f1728c..8242f8088 100644 --- a/python/swiftest/swiftest/simulation_class.py +++ b/python/swiftest/swiftest/simulation_class.py @@ -85,7 +85,7 @@ def add(self, plname, date=date.today().isoformat(), idval=None): return - def addp(self, idvals, namevals, t1, t2, t3, t4, t5, t6, GMpl=None, Rpl=None, rhill=None, Ip1=None, Ip2=None, Ip3=None, rotx=None, roty=None, rotz=None): + def addp(self, idvals, namevals, t1, t2, t3, t4, t5, t6, GMpl=None, Rpl=None, rhill=None, Ip1=None, Ip2=None, Ip3=None, rotx=None, roty=None, rotz=None, t=None): """ Adds a body (test particle or massive body) to the internal DataSet given a set up 6 vectors (orbital elements or cartesian state vectors, depending on the value of self.param). Input all angles in degress @@ -108,11 +108,15 @@ def addp(self, idvals, namevals, t1, t2, t3, t4, t5, t6, GMpl=None, Rpl=None, rh ------- self.ds : xarray dataset """ - t = self.param['T0'] + if t is None: + t = self.param['T0'] dsnew = init_cond.vec2xr(self.param, idvals, namevals, t1, t2, t3, t4, t5, t6, GMpl, Rpl, rhill, Ip1, Ip2, Ip3, rotx, roty, rotz, t) if dsnew is not None: self.ds = xr.combine_by_coords([self.ds, dsnew]) + self.ds['ntp'] = self.ds['id'].where(np.isnan(self.ds['Gmass'])).count(dim="id") + self.ds['npl'] = self.ds['id'].where(np.invert(np.isnan(self.ds['Gmass']))).count(dim="id") - 1 + return diff --git a/src/netcdf/netcdf.f90 b/src/netcdf/netcdf.f90 index 615827455..070f5f5c0 100644 --- a/src/netcdf/netcdf.f90 +++ b/src/netcdf/netcdf.f90 @@ -395,7 +395,8 @@ module subroutine netcdf_open(self, param, readonly) call check( nf90_inq_dimid(self%ncid, ID_DIMNAME, self%id_dimid) ) call check( nf90_inquire_dimension(self%ncid, max(self%time_dimid,self%id_dimid)+1, name=str_dim_name) ) call check( nf90_inq_dimid(self%ncid, str_dim_name, self%str_dimid) ) - if (str_dim_name /= "str") call check( nf90_rename_dim(self%ncid, STR_DIMNAME, self%str_dimid ) ) + write(str_dim_name,*) STR_DIMNAME + call check( nf90_rename_dim(self%ncid, self%str_dimid, str_dim_name) ) call check( nf90_inq_varid(self%ncid, TIME_DIMNAME, self%time_varid)) call check( nf90_inq_varid(self%ncid, ID_DIMNAME, self%id_varid))