diff --git a/examples/Fragmentation/Fragmentation_Movie.py b/examples/Fragmentation/Fragmentation_Movie.py index 4a0d3b80d..57f58e930 100644 --- a/examples/Fragmentation/Fragmentation_Movie.py +++ b/examples/Fragmentation/Fragmentation_Movie.py @@ -95,6 +95,8 @@ def encounter_combiner(sim): # Remove any encounter data at the same time steps that appear in the data to prevent duplicates t_not_duplicate = ~enc['time'].isin(data['time']) enc = enc.where(t_not_duplicate,drop=True) + tgood=enc.time.where(~np.isnan(enc.time),drop=True) + enc = enc.sel(time=tgood) # The following will combine the two datasets along the time dimension, sort the time dimension, and then fill in any time gaps with interpolation ds = xr.combine_nested([data,enc],concat_dim='time').sortby("time").interpolate_na(dim="time") @@ -107,7 +109,6 @@ class AnimatedScatter(object): def __init__(self, sim, animfile, title, style, nskip=1): self.ds = encounter_combiner(sim) - nframes = int(self.ds['time'].size) self.sim = sim self.title = title @@ -203,7 +204,7 @@ def data_stream(self, frame=0): minimum_fragment_gmass = 0.2 * body_Gmass[style][1] # Make the minimum fragment mass a fraction of the smallest body gmtiny = 0.99 * body_Gmass[style][1] # Make GMTINY just smaller than the smallest original body. This will prevent runaway collisional cascades sim.set_parameter(fragmentation=True, encounter_save="trajectory", gmtiny=gmtiny, minimum_fragment_gmass=minimum_fragment_gmass, verbose=False) - sim.run(dt=1e-4, tstop=1.0e-3, istep_out=1, dump_cadence=1) + sim.run(dt=1e-3, tstop=1.0e-3, istep_out=1, dump_cadence=1) print("Generating animation") anim = AnimatedScatter(sim,movie_filename,movie_titles[style],style,nskip=1) \ No newline at end of file diff --git a/python/swiftest/swiftest/io.py b/python/swiftest/swiftest/io.py index c002978b9..ade7cac00 100644 --- a/python/swiftest/swiftest/io.py +++ b/python/swiftest/swiftest/io.py @@ -816,19 +816,12 @@ def process_netcdf_input(ds, param): ds : xarray dataset """ # - #ds = ds.where(ds.id >=0,drop=True) + if param['OUT_TYPE'] == "NETCDF_DOUBLE": ds = fix_types(ds,ftype=np.float64) elif param['OUT_TYPE'] == "NETCDF_FLOAT": ds = fix_types(ds,ftype=np.float32) - # # Check if the name variable contains unique values. If so, make name the dimension instead of id - # if "id" in ds.dims: - # if len(np.unique(ds['name'])) == len(ds['name']): - # ds = ds.swap_dims({"id" : "name"}) - # if "id" in ds: - # ds = ds.reset_coords("id") - return ds def swiftest2xr(param, verbose=True): diff --git a/src/symba/symba_util.f90 b/src/symba/symba_util.f90 index 06d75bac8..157c3f5af 100644 --- a/src/symba/symba_util.f90 +++ b/src/symba/symba_util.f90 @@ -683,7 +683,7 @@ module subroutine symba_util_rearray_pl(self, system, param) allocate(levelg_orig_pl, source=pl%levelg) allocate(levelm_orig_pl, source=pl%levelm) allocate(nplenc_orig_pl, source=pl%nplenc) - lencounter = pl%encounter_check(param, system, param%dt, 0) + lencounter = pl%encounter_check(param, system, param%dt, system%irec) if (system%tp%nbody > 0) then select type(tp => system%tp) class is (symba_tp) @@ -691,7 +691,7 @@ module subroutine symba_util_rearray_pl(self, system, param) allocate(levelg_orig_tp, source=tp%levelg) allocate(levelm_orig_tp, source=tp%levelm) allocate(nplenc_orig_tp, source=tp%nplenc) - lencounter = tp%encounter_check(param, system, param%dt, 0) + lencounter = tp%encounter_check(param, system, param%dt, system%irec) call move_alloc(levelg_orig_tp, tp%levelg) call move_alloc(levelm_orig_tp, tp%levelm) call move_alloc(nplenc_orig_tp, tp%nplenc)