From 34105afde5125a47ea37afa55ece6b0a745288c9 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Tue, 24 Oct 2023 16:28:26 +0100 Subject: [PATCH 01/16] first pass at making live point generation deterministic --- run_pypolychord.py | 1 + src/polychord/generate.F90 | 88 ++++++++++++++++++------------------- src/polychord/mpi_utils.F90 | 55 +++++++++++++++++++++++ 3 files changed, 100 insertions(+), 44 deletions(-) diff --git a/run_pypolychord.py b/run_pypolychord.py index 8f739315..2333d3d3 100755 --- a/run_pypolychord.py +++ b/run_pypolychord.py @@ -45,6 +45,7 @@ def dumper(live, dead, logweights, logZ, logZerr): settings.nlive = 200 settings.do_clustering = True settings.read_resume = False +settings.seed = 1 #| Run PolyChord diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 8adf0fb3..e5ef7584 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -69,7 +69,7 @@ subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use array_module, only: add_point use abort_module #ifdef MPI - use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points + use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points,scatter_points,gather_points #else use mpi_module, only: mpi_bundle,is_root,linear_mode #endif @@ -106,6 +106,7 @@ function prior(cube) result(theta) #endif real(dp), dimension(settings%nTotal) :: live_point ! Temporary live point array + real(dp), dimension(settings%nTotal*mpi_information%nprocs) :: live_points ! Temporary live point array for generation character(len=fmt_len) :: fmt_dbl ! writing format variable @@ -116,6 +117,7 @@ function prior(cube) result(theta) real(dp) :: time0,time1,total_time real(dp),dimension(size(settings%grade_dims)) :: speed + integer :: i ! Initialise number of likelihood calls to zero here nlike = 0 @@ -185,64 +187,61 @@ function prior(cube) result(theta) else !===================== PARALLEL MODE ======================= - if(is_root(mpi_information)) then - ! The root node just recieves data from all other processors - - - active_workers=mpi_information%nprocs-1 ! Set the number of active processors to the number of workers + do while(.true.) + if(is_root(mpi_information)) then + ! root generates random numbers and scatters them to the workers + if (RTI%nlive(1)0) - ! Recieve a point from any worker - worker_id = catch_point(live_point,mpi_information) + call scatter_points(live_points,live_point,mpi_information,settings%nTotal) - ! If its valid, add it to the array - if(live_point(settings%l0)>settings%logzero) then + ! if live points have been set to -1 then exit loop + if (any(live_point<0)) exit - call add_point(live_point,RTI%live,RTI%nlive,1) ! Add this point to the array + time0 = time() + call calculate_point( loglikelihood, prior, live_point, settings,nlike) ! Compute physical coordinates, likelihoods and derived parameters + ndiscarded=ndiscarded+1 + time1 = time() + live_point(settings%b0) = settings%logzero + if(live_point(settings%l0)>settings%logzero) total_time = total_time + time1-time0 - !-------------------------------------------------------------------------------! - call write_generating_live_points(settings%feedback,RTI%nlive(1),nprior) - !-------------------------------------------------------------------------------! - if(settings%write_live) then - ! Write the live points to the live_points file - write(write_phys_unit,fmt_dbl) live_point(settings%p0:settings%d1), live_point(settings%l0) - flush(write_phys_unit) ! flush the unit to force write - end if - - end if + call gather_points(live_points,live_point,mpi_information,settings%nTotal) + ! Recieve a point from any worker + if (is_root(mpi_information)) then + do i=1, mpi_information%nprocs * settings%nTotal, settings%nTotal + if (RTI%nlive(1)>=nprior) exit ! exit loop if enough points have been generated - if(RTI%nlive(1)settings%logzero) then + call add_point(live_point,RTI%live,RTI%nlive,1) ! Add this point to the array + !-------------------------------------------------------------------------------! + call write_generating_live_points(settings%feedback,RTI%nlive(1),nprior) + !-------------------------------------------------------------------------------! + if(settings%write_live) then + ! Write the live points to the live_points file + write(write_phys_unit,fmt_dbl) live_point(settings%p0:settings%d1), live_point(settings%l0) + flush(write_phys_unit) ! flush the unit to force write + end if + end if + end do - else + end if - ! The workers simply generate and send points until they're told to stop by the administrator - do while(.true.) - - live_point(settings%h0:settings%h1) = random_reals(settings%nDims) ! Generate a random hypercube coordinate - time0 = time() - call calculate_point( loglikelihood, prior, live_point, settings,nlike) ! Compute physical coordinates, likelihoods and derived parameters - ndiscarded=ndiscarded+1 - time1 = time() - live_point(settings%b0) = settings%logzero - if(live_point(settings%l0)>settings%logzero) total_time = total_time + time1-time0 - call throw_point(live_point,mpi_information) ! Send it to the root node - if(.not. more_points_needed(mpi_information)) exit ! If we've recieved a kill signal, then exit this loop - end do - end if + end do #endif end if !(nprocs case) @@ -607,6 +606,7 @@ function prior(cube) result(theta) ! The workers simply generate and send points until they're told to stop by the administrator live_point = settings%seed_point + live_point = 0 do while(.true.) do i_repeat = 1,settings%nprior_repeat do i_dim=1,settings%nDims diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 9a891b2a..9257dfae 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -368,6 +368,61 @@ end subroutine throw_point + !============= Generating live points ================= + !> + !! + !! This a process by which the worker node 'catches' thrown points + !! from the administrator + + !> Administrator throws points to all workers + !! + !! This a process by which a worker node 'throws' a point to the root + + subroutine scatter_points(live_points,live_point,mpi_information,nTotal) + implicit none + + real(dp),intent(in),dimension(:) :: live_points !> live point to throw + real(dp),intent(out),dimension(:) :: live_point !> live point to catch + type(mpi_bundle), intent(in) :: mpi_information + integer, intent(in) :: nTotal + + call MPI_SCATTER( &! + live_points, &! + nTotal, &! + MPI_DOUBLE_PRECISION, &! + live_point, &! + nTotal, &! + MPI_DOUBLE_PRECISION, &! + mpi_information%root, &! + mpi_information%communicator,&! + mpierror &! + ) + + end subroutine scatter_points + + + + subroutine gather_points(live_points,live_point,mpi_information,nTotal) + implicit none + + real(dp),intent(in),dimension(:) :: live_point !> live point to throw + real(dp),intent(out),dimension(:) :: live_points !> live points to catch + type(mpi_bundle), intent(in) :: mpi_information + integer, intent(in) :: nTotal + + call MPI_GATHER( &! + live_point, &! + nTotal, &! + MPI_DOUBLE_PRECISION, &! + live_points, &! + nTotal, &! + MPI_DOUBLE_PRECISION, &! + mpi_information%root, &! + mpi_information%communicator,&! + mpierror &! + ) + + end subroutine gather_points From f56c4c9c9e5ecb327e90d747d5957e63a652f58b Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Tue, 24 Oct 2023 16:41:26 +0100 Subject: [PATCH 02/16] tidy comments --- src/polychord/mpi_utils.F90 | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 9257dfae..67058906 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -371,17 +371,17 @@ end subroutine throw_point !============= Generating live points ================= !> !! - !! This a process by which the worker node 'catches' thrown points - !! from the administrator + !! This a process by which the administrator 'scatters' live points + !! to all workers. - !> Administrator throws points to all workers + !> Administrator scatters live points to all workers. !! !! This a process by which a worker node 'throws' a point to the root subroutine scatter_points(live_points,live_point,mpi_information,nTotal) implicit none - real(dp),intent(in),dimension(:) :: live_points !> live point to throw + real(dp),intent(in),dimension(:) :: live_points !> live points to throw real(dp),intent(out),dimension(:) :: live_point !> live point to catch type(mpi_bundle), intent(in) :: mpi_information integer, intent(in) :: nTotal @@ -401,11 +401,16 @@ subroutine scatter_points(live_points,live_point,mpi_information,nTotal) end subroutine scatter_points + !> Administrator gathers live points from all workers. + !! + !! This a process by which the administrator node 'gathers' + !! all points to the root. + subroutine gather_points(live_points,live_point,mpi_information,nTotal) implicit none - real(dp),intent(in),dimension(:) :: live_point !> live point to throw + real(dp),intent(in),dimension(:) :: live_point !> live point to throw real(dp),intent(out),dimension(:) :: live_points !> live points to catch type(mpi_bundle), intent(in) :: mpi_information integer, intent(in) :: nTotal From abe571461b03c6a9118209c59ce5e013e9ac20ba Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Tue, 24 Oct 2023 16:41:39 +0100 Subject: [PATCH 03/16] remove testing change for run_pypolychord.py --- run_pypolychord.py | 1 - 1 file changed, 1 deletion(-) diff --git a/run_pypolychord.py b/run_pypolychord.py index 2333d3d3..8f739315 100755 --- a/run_pypolychord.py +++ b/run_pypolychord.py @@ -45,7 +45,6 @@ def dumper(live, dead, logweights, logZ, logZerr): settings.nlive = 200 settings.do_clustering = True settings.read_resume = False -settings.seed = 1 #| Run PolyChord From 74bb5ca99daadcacd7aaca374b92801265db5bed Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Tue, 24 Oct 2023 16:44:32 +0100 Subject: [PATCH 04/16] rename i to live_point_index --- src/polychord/generate.F90 | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index e5ef7584..80440d97 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -117,7 +117,7 @@ function prior(cube) result(theta) real(dp) :: time0,time1,total_time real(dp),dimension(size(settings%grade_dims)) :: speed - integer :: i + integer :: live_point_index ! Start index of the live point in the live_points array. ! Initialise number of likelihood calls to zero here nlike = 0 @@ -216,10 +216,10 @@ function prior(cube) result(theta) ! Recieve a point from any worker if (is_root(mpi_information)) then - do i=1, mpi_information%nprocs * settings%nTotal, settings%nTotal + do live_point_index=1, mpi_information%nprocs * settings%nTotal, settings%nTotal if (RTI%nlive(1)>=nprior) exit ! exit loop if enough points have been generated - live_point=live_points(i:i+settings%nTotal-1) + live_point=live_points(live_point_index:live_point_index+settings%nTotal-1) ! If its valid, add it to the array if(live_point(settings%l0)>settings%logzero) then From dcdaa1a26bf1f21d9ed4eb17ff906ad8c94497db Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Tue, 24 Oct 2023 17:26:32 +0100 Subject: [PATCH 05/16] bring mpi docstrings more in to line --- src/polychord/mpi_utils.F90 | 15 ++++++++++----- 1 file changed, 10 insertions(+), 5 deletions(-) diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 67058906..b8ee83eb 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -368,11 +368,16 @@ end subroutine throw_point - !============= Generating live points ================= - !> - !! - !! This a process by which the administrator 'scatters' live points - !! to all workers. + !============== Scattering/gathering live points ==================== + ! This a process by which the administrator 'scatters' live points + ! to all workers, and gathers them back again. + ! + ! This is used in the initial generation of live points. + ! scatter_points: + ! root ----> all workers + ! + ! gather_points: + ! all workers ----> root !> Administrator scatters live points to all workers. !! From fcaf9f3afebc69eb57c215b3dc4fd2e4a44d01b8 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 25 Oct 2023 01:04:36 +0100 Subject: [PATCH 06/16] remembered that nTotal isn't needed (hangover from another idea) --- src/polychord/generate.F90 | 4 ++-- src/polychord/mpi_utils.F90 | 14 ++++++-------- 2 files changed, 8 insertions(+), 10 deletions(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 80440d97..78fbed26 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -199,7 +199,7 @@ function prior(cube) result(theta) end if - call scatter_points(live_points,live_point,mpi_information,settings%nTotal) + call scatter_points(live_points,live_point,mpi_information) ! if live points have been set to -1 then exit loop if (any(live_point<0)) exit @@ -212,7 +212,7 @@ function prior(cube) result(theta) if(live_point(settings%l0)>settings%logzero) total_time = total_time + time1-time0 - call gather_points(live_points,live_point,mpi_information,settings%nTotal) + call gather_points(live_points,live_point,mpi_information) ! Recieve a point from any worker if (is_root(mpi_information)) then diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index b8ee83eb..2100e3ad 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -383,20 +383,19 @@ end subroutine throw_point !! !! This a process by which a worker node 'throws' a point to the root - subroutine scatter_points(live_points,live_point,mpi_information,nTotal) + subroutine scatter_points(live_points,live_point,mpi_information) implicit none real(dp),intent(in),dimension(:) :: live_points !> live points to throw real(dp),intent(out),dimension(:) :: live_point !> live point to catch type(mpi_bundle), intent(in) :: mpi_information - integer, intent(in) :: nTotal call MPI_SCATTER( &! live_points, &! - nTotal, &! + size(live_point), &! MPI_DOUBLE_PRECISION, &! live_point, &! - nTotal, &! + size(live_point), &! MPI_DOUBLE_PRECISION, &! mpi_information%root, &! mpi_information%communicator,&! @@ -412,20 +411,19 @@ end subroutine scatter_points !! all points to the root. - subroutine gather_points(live_points,live_point,mpi_information,nTotal) + subroutine gather_points(live_points,live_point,mpi_information) implicit none real(dp),intent(in),dimension(:) :: live_point !> live point to throw real(dp),intent(out),dimension(:) :: live_points !> live points to catch type(mpi_bundle), intent(in) :: mpi_information - integer, intent(in) :: nTotal call MPI_GATHER( &! live_point, &! - nTotal, &! + size(live_point), &! MPI_DOUBLE_PRECISION, &! live_points, &! - nTotal, &! + size(live_point), &! MPI_DOUBLE_PRECISION, &! mpi_information%root, &! mpi_information%communicator,&! From 05ab023f38d04c13b9b9549fd317a6b438f61a0b Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 25 Oct 2023 01:08:33 +0100 Subject: [PATCH 07/16] remove unused imports --- src/polychord/generate.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 78fbed26..092dd3b7 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -69,7 +69,7 @@ subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use array_module, only: add_point use abort_module #ifdef MPI - use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points,scatter_points,gather_points + use mpi_module, only: mpi_bundle,is_root,linear_mode,sum_integers,sum_doubles,scatter_points,gather_points #else use mpi_module, only: mpi_bundle,is_root,linear_mode #endif From baad0e937f9db2c4078ec8e241df54e409587181 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 25 Oct 2023 15:02:06 +0100 Subject: [PATCH 08/16] version bump --- README.rst | 2 +- pypolychord/__init__.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.rst b/README.rst index 0c154185..156ca1c2 100644 --- a/README.rst +++ b/README.rst @@ -5,7 +5,7 @@ :target: https://arxiv.org/abs/1506.00171 :alt: Open-access paper -PolyChord v 1.20.2 +PolyChord v 1.21.0 Will Handley, Mike Hobson & Anthony Lasenby diff --git a/pypolychord/__init__.py b/pypolychord/__init__.py index 16f2deef..7c6e3ddb 100644 --- a/pypolychord/__init__.py +++ b/pypolychord/__init__.py @@ -1,3 +1,3 @@ -__version__ = "1.20.2" +__version__ = "1.21.0" from pypolychord.settings import PolyChordSettings from pypolychord.polychord import run_polychord From 5e878f560e50c9f1f66eb403381c33738c651f90 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 25 Oct 2023 15:05:55 +0100 Subject: [PATCH 09/16] complete version bump --- src/polychord/feedback.f90 | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/polychord/feedback.f90 b/src/polychord/feedback.f90 index 42d4fc0c..30f494ae 100644 --- a/src/polychord/feedback.f90 +++ b/src/polychord/feedback.f90 @@ -28,8 +28,8 @@ subroutine write_opening_statement(settings) write(stdout_unit,'("")') write(stdout_unit,'("PolyChord: Next Generation Nested Sampling")') write(stdout_unit,'("copyright: Will Handley, Mike Hobson & Anthony Lasenby")') - write(stdout_unit,'(" version: 1.20.2")') - write(stdout_unit,'(" release: 1st June 2021")') + write(stdout_unit,'(" version: 1.21.0")') + write(stdout_unit,'(" release: 25th October 2023")') write(stdout_unit,'(" email: wh260@mrao.cam.ac.uk")') write(stdout_unit,'("")') end if From 08a111054290af2bae31634ddecaa28266a1b1ef Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 22 Nov 2023 14:51:57 +0000 Subject: [PATCH 10/16] use point_needed() and request_this_point to offload all random number generation to the root process --- src/polychord/generate.F90 | 16 +++++---- src/polychord/mpi_utils.F90 | 69 +++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+), 6 deletions(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 8adf0fb3..2c83dc63 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -69,7 +69,7 @@ subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use array_module, only: add_point use abort_module #ifdef MPI - use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points + use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points,request_this_point,point_needed #else use mpi_module, only: mpi_bundle,is_root,linear_mode #endif @@ -190,6 +190,11 @@ function prior(cube) result(theta) active_workers=mpi_information%nprocs-1 ! Set the number of active processors to the number of workers + do worker_id=1,active_workers + ! Request a point from any worker + live_point(settings%h0:settings%h1) = random_reals(settings%nDims) ! Generate a random hypercube coordinate + call request_this_point(live_point,mpi_information,worker_id) + end do do while(active_workers>0) @@ -215,7 +220,8 @@ function prior(cube) result(theta) if(RTI%nlive(1)settings%logzero) total_time = total_time + time1-time0 call throw_point(live_point,mpi_information) ! Send it to the root node - if(.not. more_points_needed(mpi_information)) exit ! If we've recieved a kill signal, then exit this loop end do end if diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 9a891b2a..5a7d1bad 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -693,6 +693,75 @@ function more_points_needed(mpi_information) end function more_points_needed + + !============== New messaging routines =========================== + ! Fix this later! + ! During initial live point generation, the administrator needs to signal to the workers + ! whether or not to keep generating live points, or whether to stop + ! + ! administrator ----> worker + ! request_point more_points_needed -> true + ! no_more_points more_points_needed -> false + ! + + !> Request point + !! + !! This subroutine is used by the root node to request a new live point + subroutine request_this_point(live_point,mpi_information,worker_id) + implicit none + type(mpi_bundle), intent(in) :: mpi_information + integer, intent(in) :: worker_id !> Worker to request a new point from + real(dp), intent(in), dimension(:) :: live_point !> The live point to be sent + + + call MPI_SEND( & + live_point, &! not sending anything + size(live_point), &! + MPI_DOUBLE_PRECISION, &! sending doubles + worker_id, &! process id to send to + tag_gen_request, &! continuation tag + mpi_information%communicator,&! mpi handle + mpierror &! error flag + ) + + end subroutine request_this_point + + !> See if more points are needed + !! + !! This subroutine is used by the root node to request a new live point + function point_needed(live_point,mpi_information) + use abort_module + implicit none + type(mpi_bundle), intent(in) :: mpi_information + real(dp),intent(out),dimension(:) :: live_point !> live point to throw + + integer, dimension(MPI_STATUS_SIZE) :: mpistatus ! status identifier + + logical :: point_needed !> Whether we need more points or not + + call MPI_RECV( &! + live_point, &! + size(live_point), &! + MPI_DOUBLE_PRECISION, &! + mpi_information%root, &! + MPI_ANY_TAG, &! + mpi_information%communicator,&! + mpistatus, &! + mpierror &! + ) + + ! If we've recieved a kill signal, then exit this loop + if(mpistatus(MPI_TAG) == tag_gen_stop ) then + point_needed = .false. + else if(mpistatus(MPI_TAG) == tag_gen_request) then + point_needed = .true. + else + call halt_program('generate error: unrecognised tag') + end if + + end function point_needed + + #endif From ac84ecfeef34027a0fd2b992af93b62cc8e3d50b Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 22 Nov 2023 15:17:55 +0000 Subject: [PATCH 11/16] Revert changes up until now --- README.rst | 2 +- pypolychord/__init__.py | 2 +- src/polychord/feedback.f90 | 4 +- src/polychord/generate.F90 | 88 ++++++++++++++++++------------------- src/polychord/mpi_utils.F90 | 63 -------------------------- 5 files changed, 48 insertions(+), 111 deletions(-) diff --git a/README.rst b/README.rst index 156ca1c2..0c154185 100644 --- a/README.rst +++ b/README.rst @@ -5,7 +5,7 @@ :target: https://arxiv.org/abs/1506.00171 :alt: Open-access paper -PolyChord v 1.21.0 +PolyChord v 1.20.2 Will Handley, Mike Hobson & Anthony Lasenby diff --git a/pypolychord/__init__.py b/pypolychord/__init__.py index 7c6e3ddb..16f2deef 100644 --- a/pypolychord/__init__.py +++ b/pypolychord/__init__.py @@ -1,3 +1,3 @@ -__version__ = "1.21.0" +__version__ = "1.20.2" from pypolychord.settings import PolyChordSettings from pypolychord.polychord import run_polychord diff --git a/src/polychord/feedback.f90 b/src/polychord/feedback.f90 index 30f494ae..42d4fc0c 100644 --- a/src/polychord/feedback.f90 +++ b/src/polychord/feedback.f90 @@ -28,8 +28,8 @@ subroutine write_opening_statement(settings) write(stdout_unit,'("")') write(stdout_unit,'("PolyChord: Next Generation Nested Sampling")') write(stdout_unit,'("copyright: Will Handley, Mike Hobson & Anthony Lasenby")') - write(stdout_unit,'(" version: 1.21.0")') - write(stdout_unit,'(" release: 25th October 2023")') + write(stdout_unit,'(" version: 1.20.2")') + write(stdout_unit,'(" release: 1st June 2021")') write(stdout_unit,'(" email: wh260@mrao.cam.ac.uk")') write(stdout_unit,'("")') end if diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 092dd3b7..8adf0fb3 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -69,7 +69,7 @@ subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use array_module, only: add_point use abort_module #ifdef MPI - use mpi_module, only: mpi_bundle,is_root,linear_mode,sum_integers,sum_doubles,scatter_points,gather_points + use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points #else use mpi_module, only: mpi_bundle,is_root,linear_mode #endif @@ -106,7 +106,6 @@ function prior(cube) result(theta) #endif real(dp), dimension(settings%nTotal) :: live_point ! Temporary live point array - real(dp), dimension(settings%nTotal*mpi_information%nprocs) :: live_points ! Temporary live point array for generation character(len=fmt_len) :: fmt_dbl ! writing format variable @@ -117,7 +116,6 @@ function prior(cube) result(theta) real(dp) :: time0,time1,total_time real(dp),dimension(size(settings%grade_dims)) :: speed - integer :: live_point_index ! Start index of the live point in the live_points array. ! Initialise number of likelihood calls to zero here nlike = 0 @@ -187,61 +185,64 @@ function prior(cube) result(theta) else !===================== PARALLEL MODE ======================= - do while(.true.) - if(is_root(mpi_information)) then - ! root generates random numbers and scatters them to the workers - if (RTI%nlive(1)0) - time0 = time() - call calculate_point( loglikelihood, prior, live_point, settings,nlike) ! Compute physical coordinates, likelihoods and derived parameters - ndiscarded=ndiscarded+1 - time1 = time() - live_point(settings%b0) = settings%logzero - if(live_point(settings%l0)>settings%logzero) total_time = total_time + time1-time0 + ! Recieve a point from any worker + worker_id = catch_point(live_point,mpi_information) + ! If its valid, add it to the array + if(live_point(settings%l0)>settings%logzero) then - call gather_points(live_points,live_point,mpi_information) - ! Recieve a point from any worker + call add_point(live_point,RTI%live,RTI%nlive,1) ! Add this point to the array - if (is_root(mpi_information)) then - do live_point_index=1, mpi_information%nprocs * settings%nTotal, settings%nTotal - if (RTI%nlive(1)>=nprior) exit ! exit loop if enough points have been generated + !-------------------------------------------------------------------------------! + call write_generating_live_points(settings%feedback,RTI%nlive(1),nprior) + !-------------------------------------------------------------------------------! - live_point=live_points(live_point_index:live_point_index+settings%nTotal-1) + if(settings%write_live) then + ! Write the live points to the live_points file + write(write_phys_unit,fmt_dbl) live_point(settings%p0:settings%d1), live_point(settings%l0) + flush(write_phys_unit) ! flush the unit to force write + end if - ! If its valid, add it to the array - if(live_point(settings%l0)>settings%logzero) then + end if - call add_point(live_point,RTI%live,RTI%nlive,1) ! Add this point to the array - !-------------------------------------------------------------------------------! - call write_generating_live_points(settings%feedback,RTI%nlive(1),nprior) - !-------------------------------------------------------------------------------! + if(RTI%nlive(1)settings%logzero) total_time = total_time + time1-time0 + call throw_point(live_point,mpi_information) ! Send it to the root node + if(.not. more_points_needed(mpi_information)) exit ! If we've recieved a kill signal, then exit this loop + + end do + end if #endif end if !(nprocs case) @@ -606,7 +607,6 @@ function prior(cube) result(theta) ! The workers simply generate and send points until they're told to stop by the administrator live_point = settings%seed_point - live_point = 0 do while(.true.) do i_repeat = 1,settings%nprior_repeat do i_dim=1,settings%nDims diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 2100e3ad..9a891b2a 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -368,69 +368,6 @@ end subroutine throw_point - !============== Scattering/gathering live points ==================== - ! This a process by which the administrator 'scatters' live points - ! to all workers, and gathers them back again. - ! - ! This is used in the initial generation of live points. - ! scatter_points: - ! root ----> all workers - ! - ! gather_points: - ! all workers ----> root - - !> Administrator scatters live points to all workers. - !! - !! This a process by which a worker node 'throws' a point to the root - - subroutine scatter_points(live_points,live_point,mpi_information) - implicit none - - real(dp),intent(in),dimension(:) :: live_points !> live points to throw - real(dp),intent(out),dimension(:) :: live_point !> live point to catch - type(mpi_bundle), intent(in) :: mpi_information - - call MPI_SCATTER( &! - live_points, &! - size(live_point), &! - MPI_DOUBLE_PRECISION, &! - live_point, &! - size(live_point), &! - MPI_DOUBLE_PRECISION, &! - mpi_information%root, &! - mpi_information%communicator,&! - mpierror &! - ) - - end subroutine scatter_points - - - !> Administrator gathers live points from all workers. - !! - !! This a process by which the administrator node 'gathers' - !! all points to the root. - - - subroutine gather_points(live_points,live_point,mpi_information) - implicit none - - real(dp),intent(in),dimension(:) :: live_point !> live point to throw - real(dp),intent(out),dimension(:) :: live_points !> live points to catch - type(mpi_bundle), intent(in) :: mpi_information - - call MPI_GATHER( &! - live_point, &! - size(live_point), &! - MPI_DOUBLE_PRECISION, &! - live_points, &! - size(live_point), &! - MPI_DOUBLE_PRECISION, &! - mpi_information%root, &! - mpi_information%communicator,&! - mpierror &! - ) - - end subroutine gather_points From 48138c8647557b8407981c7b148549b046b49795 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 22 Nov 2023 15:25:19 +0000 Subject: [PATCH 12/16] update comments --- src/polychord/mpi_utils.F90 | 21 ++++++++------------- 1 file changed, 8 insertions(+), 13 deletions(-) diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 5a7d1bad..4e6632ab 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -693,20 +693,15 @@ function more_points_needed(mpi_information) end function more_points_needed - - !============== New messaging routines =========================== - ! Fix this later! - ! During initial live point generation, the administrator needs to signal to the workers - ! whether or not to keep generating live points, or whether to stop - ! - ! administrator ----> worker - ! request_point more_points_needed -> true - ! no_more_points more_points_needed -> false + !> Request specific live points + ! administrator ----> worker + ! request_this_point(live_point) point_needed -> true + ! no_more_points (defined above) point_needed -> false ! - !> Request point + !> Request this point !! - !! This subroutine is used by the root node to request a new live point + !! This subroutine is used by the root node to request a specific live point subroutine request_this_point(live_point,mpi_information,worker_id) implicit none type(mpi_bundle), intent(in) :: mpi_information @@ -726,9 +721,9 @@ subroutine request_this_point(live_point,mpi_information,worker_id) end subroutine request_this_point - !> See if more points are needed + !> See if another point is needed !! - !! This subroutine is used by the root node to request a new live point + !! This subroutine is used by the root node to request a specific live point function point_needed(live_point,mpi_information) use abort_module implicit none From ebc1aea0645211fc30491b11c718ccdc40aa30d7 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Wed, 22 Nov 2023 20:44:31 +0000 Subject: [PATCH 13/16] version bump --- README.rst | 2 +- pypolychord/__init__.py | 2 +- src/polychord/feedback.f90 | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/README.rst b/README.rst index 0c154185..156ca1c2 100644 --- a/README.rst +++ b/README.rst @@ -5,7 +5,7 @@ :target: https://arxiv.org/abs/1506.00171 :alt: Open-access paper -PolyChord v 1.20.2 +PolyChord v 1.21.0 Will Handley, Mike Hobson & Anthony Lasenby diff --git a/pypolychord/__init__.py b/pypolychord/__init__.py index 16f2deef..7c6e3ddb 100644 --- a/pypolychord/__init__.py +++ b/pypolychord/__init__.py @@ -1,3 +1,3 @@ -__version__ = "1.20.2" +__version__ = "1.21.0" from pypolychord.settings import PolyChordSettings from pypolychord.polychord import run_polychord diff --git a/src/polychord/feedback.f90 b/src/polychord/feedback.f90 index 42d4fc0c..8c8e4596 100644 --- a/src/polychord/feedback.f90 +++ b/src/polychord/feedback.f90 @@ -28,7 +28,7 @@ subroutine write_opening_statement(settings) write(stdout_unit,'("")') write(stdout_unit,'("PolyChord: Next Generation Nested Sampling")') write(stdout_unit,'("copyright: Will Handley, Mike Hobson & Anthony Lasenby")') - write(stdout_unit,'(" version: 1.20.2")') + write(stdout_unit,'(" version: 1.21.0")') write(stdout_unit,'(" release: 1st June 2021")') write(stdout_unit,'(" email: wh260@mrao.cam.ac.uk")') write(stdout_unit,'("")') From 6db50747dd7ae5a2f143309eb7b504dc8423bb16 Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Fri, 24 Nov 2023 16:19:21 +0000 Subject: [PATCH 14/16] record order that random samples were generated in so that their order can be restored at the end --- src/polychord/generate.F90 | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 2c83dc63..029e82e9 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -61,7 +61,7 @@ end function GenerateSeed subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use settings_module, only: program_settings use random_module, only: random_reals - use utils_module, only: write_phys_unit,DB_FMT,fmt_len,minpos,time + use utils_module, only: write_phys_unit,DB_FMT,fmt_len,minpos,time,sort_doubles use calculate_module, only: calculate_point use read_write_module, only: phys_live_file, prior_info_file use feedback_module, only: write_started_generating,write_finished_generating,write_generating_live_points @@ -111,7 +111,8 @@ function prior(cube) result(theta) character(len=fmt_len) :: fmt_dbl ! writing format variable integer :: nlike ! number of likelihood calls - integer :: nprior, ndiscarded + integer :: nprior, ndiscarded, + integer :: ngenerated ! use to track order points are generated in real(dp) :: time0,time1,total_time real(dp),dimension(size(settings%grade_dims)) :: speed @@ -119,6 +120,7 @@ function prior(cube) result(theta) ! Initialise number of likelihood calls to zero here nlike = 0 + ngenerated = 1 if(is_root(mpi_information)) then @@ -193,6 +195,9 @@ function prior(cube) result(theta) do worker_id=1,active_workers ! Request a point from any worker live_point(settings%h0:settings%h1) = random_reals(settings%nDims) ! Generate a random hypercube coordinate + ! use the time as an ordering identifier, cheat by using the birth contour + live_point(settings%b0) = ngenerated + ngenerated = ngenerated+1 call request_this_point(live_point,mpi_information,worker_id) end do @@ -221,6 +226,9 @@ function prior(cube) result(theta) if(RTI%nlive(1)settings%logzero) total_time = total_time + time1-time0 call throw_point(live_point,mpi_information) ! Send it to the root node From 293c016eca7e9ada5132cc19bf67f2d227d2267a Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Fri, 24 Nov 2023 16:23:07 +0000 Subject: [PATCH 15/16] errant comma --- src/polychord/generate.F90 | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 029e82e9..5900f988 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -111,7 +111,7 @@ function prior(cube) result(theta) character(len=fmt_len) :: fmt_dbl ! writing format variable integer :: nlike ! number of likelihood calls - integer :: nprior, ndiscarded, + integer :: nprior, ndiscarded integer :: ngenerated ! use to track order points are generated in real(dp) :: time0,time1,total_time From 494b62c222e90e44730d12b78d3a38ded3f5dc3c Mon Sep 17 00:00:00 2001 From: AdamOrmondroyd Date: Tue, 9 Jan 2024 12:00:50 +0000 Subject: [PATCH 16/16] rename request_this_point -> request_live_point and point_needed -> live_point_needed + tidy comments --- src/polychord/generate.F90 | 8 ++++---- src/polychord/mpi_utils.F90 | 36 ++++++++++++++++++------------------ 2 files changed, 22 insertions(+), 22 deletions(-) diff --git a/src/polychord/generate.F90 b/src/polychord/generate.F90 index 5900f988..3bc0decc 100644 --- a/src/polychord/generate.F90 +++ b/src/polychord/generate.F90 @@ -69,7 +69,7 @@ subroutine GenerateLivePoints(loglikelihood,prior,settings,RTI,mpi_information) use array_module, only: add_point use abort_module #ifdef MPI - use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points,request_this_point,point_needed + use mpi_module, only: mpi_bundle,is_root,linear_mode,throw_point,catch_point,more_points_needed,sum_integers,sum_doubles,request_point,no_more_points,request_live_point,live_point_needed #else use mpi_module, only: mpi_bundle,is_root,linear_mode #endif @@ -198,7 +198,7 @@ function prior(cube) result(theta) ! use the time as an ordering identifier, cheat by using the birth contour live_point(settings%b0) = ngenerated ngenerated = ngenerated+1 - call request_this_point(live_point,mpi_information,worker_id) + call request_live_point(live_point,mpi_information,worker_id) end do do while(active_workers>0) @@ -229,7 +229,7 @@ function prior(cube) result(theta) ! use the time as a unique identifier, cheat by using the birth contour live_point(settings%b0) = ngenerated ngenerated = ngenerated+1 - call request_this_point(live_point,mpi_information,worker_id) + call request_live_point(live_point,mpi_information,worker_id) else call no_more_points(mpi_information,worker_id) ! Otherwise, send a signal to stop active_workers=active_workers-1 ! decrease the active worker counter @@ -248,7 +248,7 @@ function prior(cube) result(theta) ! The workers simply generate and send points until they're told to stop by the administrator - do while(point_needed(live_point,mpi_information)) + do while(live_point_needed(live_point,mpi_information)) time0 = time() call calculate_point( loglikelihood, prior, live_point, settings,nlike) ! Compute physical coordinates, likelihoods and derived parameters ndiscarded=ndiscarded+1 diff --git a/src/polychord/mpi_utils.F90 b/src/polychord/mpi_utils.F90 index 4e6632ab..03d7315e 100644 --- a/src/polychord/mpi_utils.F90 +++ b/src/polychord/mpi_utils.F90 @@ -695,22 +695,22 @@ end function more_points_needed !> Request specific live points ! administrator ----> worker - ! request_this_point(live_point) point_needed -> true + ! request_live_point(live_point) point_needed -> true ! no_more_points (defined above) point_needed -> false ! - !> Request this point + !> Request live point !! !! This subroutine is used by the root node to request a specific live point - subroutine request_this_point(live_point,mpi_information,worker_id) + subroutine request_live_point(live_point,mpi_information,worker_id) implicit none type(mpi_bundle), intent(in) :: mpi_information integer, intent(in) :: worker_id !> Worker to request a new point from real(dp), intent(in), dimension(:) :: live_point !> The live point to be sent - call MPI_SEND( & - live_point, &! not sending anything + call MPI_SEND( &! + live_point, &! live point being sent size(live_point), &! MPI_DOUBLE_PRECISION, &! sending doubles worker_id, &! process id to send to @@ -719,12 +719,12 @@ subroutine request_this_point(live_point,mpi_information,worker_id) mpierror &! error flag ) - end subroutine request_this_point + end subroutine request_live_point !> See if another point is needed !! !! This subroutine is used by the root node to request a specific live point - function point_needed(live_point,mpi_information) + function live_point_needed(live_point,mpi_information) use abort_module implicit none type(mpi_bundle), intent(in) :: mpi_information @@ -732,29 +732,29 @@ function point_needed(live_point,mpi_information) integer, dimension(MPI_STATUS_SIZE) :: mpistatus ! status identifier - logical :: point_needed !> Whether we need more points or not + logical :: live_point_needed !> Whether we need more points or not call MPI_RECV( &! - live_point, &! + live_point, &! live point recieved size(live_point), &! - MPI_DOUBLE_PRECISION, &! - mpi_information%root, &! - MPI_ANY_TAG, &! - mpi_information%communicator,&! - mpistatus, &! - mpierror &! + MPI_DOUBLE_PRECISION, &! receiving doubles + mpi_information%root, &! root node + MPI_ANY_TAG, &! mpi tag + mpi_information%communicator,&! mpi handle + mpistatus, &! status identifier + mpierror &! error flag ) ! If we've recieved a kill signal, then exit this loop if(mpistatus(MPI_TAG) == tag_gen_stop ) then - point_needed = .false. + live_point_needed = .false. else if(mpistatus(MPI_TAG) == tag_gen_request) then - point_needed = .true. + live_point_needed = .true. else call halt_program('generate error: unrecognised tag') end if - end function point_needed + end function live_point_needed #endif