Skip to content
This repository was archived by the owner on Aug 28, 2024. It is now read-only.

Commit

Permalink
Browse files Browse the repository at this point in the history
Improved array handling and tolerance
  • Loading branch information
daminton committed May 14, 2021
1 parent 4041b4d commit e8a8d77
Show file tree
Hide file tree
Showing 3 changed files with 7 additions and 6 deletions.
2 changes: 1 addition & 1 deletion src/symba/symba_frag_pos.f90
Original file line number Diff line number Diff line change
Expand Up @@ -347,7 +347,7 @@ subroutine symba_frag_pos_kinetic_energy(m_frag, ke_target, L_target, x_frag, v_
! Internals
real(DP) :: mtot !! Total mass of fragments
integer(I4B) :: i, nfrag, neval
real(DP), parameter :: TOL = 1e-9_DP
real(DP), parameter :: TOL = 1e-12_DP
real(DP), dimension(:), allocatable :: vflat
logical :: lerr

Expand Down
5 changes: 3 additions & 2 deletions src/util/util_minimize_bfgs.f90
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,7 @@ function util_minimize_bfgs(f, N, x0, eps, lerr) result(x1)
! Internals
integer(I4B) :: i, j, k, l, conv, num, fnum
integer(I4B), parameter :: MAXLOOP = 2000 !! Maximum number of loops before method is determined to have failed
real(DP), parameter :: gradeps = 1e-4_DP !! Tolerance for gradient calculations
real(DP), dimension(N) :: S !! Direction vectors
real(DP), dimension(N) :: Snorm !! normalized direction
real(DP), dimension(N,N) :: H !! Approximated inverse Hessian matrix
Expand All @@ -43,7 +44,7 @@ function util_minimize_bfgs(f, N, x0, eps, lerr) result(x1)
! Initialize approximate Hessian with the identity matrix (i.e. begin with method of steepest descent)
H(:,:) = reshape([((0._DP, i=1, j-1), 1._DP, (0._DP, i=j+1, N), j=1, N)], [N,N])
! Get initial gradient and initialize arrays for updated values of gradient and x
fnum = fnum + gradf(f, N, x0(:), grad0, eps)
fnum = fnum + gradf(f, N, x0(:), grad0, gradeps)
allocate(x1, source=x0)
grad1(:) = grad0(:)
do i = 1, MAXLOOP
Expand All @@ -69,7 +70,7 @@ function util_minimize_bfgs(f, N, x0, eps, lerr) result(x1)
x1(:) = x1(:) + P(:)
! Calculate new gradient
grad0(:) = grad1(:)
fnum = fnum + gradf(f, N, x1, grad1, eps)
fnum = fnum + gradf(f, N, x1, grad1, gradeps)
y(:) = grad1(:) - grad0(:)
Py = sum(P(:) * y(:))
! set up factors for H matrix update
Expand Down
6 changes: 3 additions & 3 deletions src/util/util_solve_linear_system.f90
Original file line number Diff line number Diff line change
Expand Up @@ -38,9 +38,9 @@ function solve_wbs(u, lerr) result(x) ! solve with backward substitution
integer(I4B) :: i,n
real(DP), parameter :: epsilon = 10 * tiny(1._DP)

if (allocated(x)) deallocate(x)
allocate(x(n))

n = size(u, 1)
if (allocated(x) .and. (size(x) /= n)) deallocate(x)
if (.not.allocated(x)) allocate(x(n))
lerr = any(abs(u(:,:)) < epsilon)
if (lerr) then
x(:) = 0._DP
Expand Down

0 comments on commit e8a8d77

Please sign in to comment.