Skip to content

Commit

Permalink
Merge branch 'testing' into master
Browse files Browse the repository at this point in the history
  • Loading branch information
P-Mousley authored Jan 10, 2025
2 parents 26d01e0 + 9b6803f commit 7699494
Show file tree
Hide file tree
Showing 10 changed files with 336 additions and 67 deletions.
63 changes: 63 additions & 0 deletions CLI/example_islatu.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,63 @@
instrument: 'i07'
visit:
local contact: "Firstname Lastname"
user: 'Firstname Lastname'
user affiliation: 'InstitutionName'
visit id: 'experimentID'
date: 2021-08-06
setup:
# ====(sample_length, sample_width) in m
# ====...where the "length" direction is parallel to the wavevector of the
# ====incident light for |q|=0.
sample size: (200e-3, 10e-3)

# ====Beam FWHM in m
beam width: 100e-6

#==== /path/to/normalization/file comment this line out if not using dcd normalisation
#=== Outside of diamond, this might look like, for example:
# ====/Users/richardbrearton/Documents/Data/si28707-1/404863.dat
dcd normalisation: /dls/i07/data/2021/si28707-1/404863.dat



#name of q_axis e.g. 'qdcd' - used in interpolator for corrections, defaults to "qdcd_"
# q axis:

#set whether image is transposed or not, will the swap qz and qxy if transposed.
# transpose: True

# unclear where this is used by islatu
# pixel max:

# unclear where this is used by islatu
# hot pixel max:

#name of theta axis e.g 'dcdtheta' - unclear where this is used by islatu
# theta_axis:


crop:
method: crop
# Leave kwargs commented to crop to ROI_1, as specified in GDA.
# Uncomment kwargs to crop to manually set the cropping region.
# kwargs: {'x_start': 1050, 'x_end': 1150, 'y_start': 190, 'y_end': 211}
background:
# The most reliable method that one can use to subtract background is
# roi_subtraction. We strongly recommend that this option is used.
method: roi_subtraction
# Uncomment kwargs to background to manually set the background region.
# kwargs: {'x_start': 1050, 'x_end': 1150, 'y_start': 190, 'y_end': 211}

#set what columns you want in the output, currently only working option is 3
# 3 = 'Qz / Aa^-1', 'RQz', 'sigma RQz, standard deviation'
output_columns: 3
rebin:
# Number of bins to place q-vectors into. These bins are linearly spaced in q
# by default.
n qvectors: 5000
#alternative definition of rebinning parameters, although does not seem to be fully implemented
# min:
# max:
# step:
# shape:
8 changes: 8 additions & 0 deletions CLI/islatuscript_template.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
#!/bin/bash
#SBATCH --partition cs05r
#SBATCH --nodes=1
#SBATCH --cpus-per-task=20
#SBATCH --mem-per-cpu=2000
#SBATCH --job-name=isaltu

/dls_sw/apps/islatu/testing/conda_env/bin/python ${save_path}
113 changes: 104 additions & 9 deletions CLI/process_xrr.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,10 @@

import argparse
import os

from pathlib import Path
import subprocess
import time
import re

if __name__ == "__main__":
# First deal with the parsing of the command line arguments using the
Expand Down Expand Up @@ -133,13 +136,6 @@
# Set islatu's logger to requested verbosity.
debug.logging_lvl = args.verbose

# Now it's time to prepare to do some XRR reduction. If the user is in
# diamond and wants to use a cluster, then we should go ahead and do that.
if args.cluster:
raise NotImplementedError(
"Islatu currently only runs locally. If cluster submission is " +
"necessary, please contact richard.brearton@diamond.ac.uk"
)

# If execution reaches here, we're processing the scan locally. First look
# for the .yaml file if we weren't explicitly told where it is.
Expand Down Expand Up @@ -264,5 +260,104 @@
# If execution reaches here, we found the .yaml file and we have the scan
# numbers we'll construct the XRR curve from. This is all that we need: a
# recipe and some data; let's go ahead and process the data on this machine.
i07reduce(args.scan_numbers, args.yaml_path, args.data_path,
# Now it's time to prepare to do some XRR reduction. If the user is in
# diamond and wants to use a cluster, then we should go ahead and do that.
if args.cluster:
islatufolder=f'{Path.home()}/islatu'
if not os.path.exists(islatufolder):
os.makedirs(islatufolder)
i=1
save_path=f'{islatufolder}/jobscript_{i}.py'
while (os.path.exists(str(save_path))):
i += 1
save_file_name = f'{islatufolder}/testscript_{i}.py'
save_path = Path(save_file_name)
if i > 1e7:
raise ValueError(
"naming counter hit limit therefore exiting ")
f=open(save_path,'x')
f.write("from islatu.runner import i07reduce\n")
f.write(f"scans = {args.scan_numbers}\nyamlpath='{args.yaml_path}'\ndatapath='{args.data_path}'\noutfile='{args.output}'\nqsubdict={args.limit_q}\n")
f.write("i07reduce(scans, yamlpath, datapath,filename=outfile, q_subsample_dicts=qsubdict)")
#f.write(f"i07reduce({args.scan_numbers}, {args.yaml_path}, {args.data_path},\
# filename={args.output}, q_subsample_dicts={args.limit_q})")
f.close()

#load in template mapscript, new paths
f=open('/dls_sw/apps/islatu/testing/islatu/CLI/islatuscript_template.sh')
lines=f.readlines()
f.close()
jobfile=f'{islatufolder}//jobscript.sh'
if os.path.exists(jobfile):
f=open(jobfile,'w')
else:
f=open(jobfile,'x')
for line in lines:
if '$' in line:
phrase=line[line.find('$'):line.find('}')+1]
outphrase=phrase.strip('$').strip('{').strip('}')
outline=line.replace(phrase,str(locals()[f'{outphrase}']))
#print(outline)
f.write(outline)
else:
f.write(line)
f.close()

#get list of slurm out files in home directory
startfiles=os.listdir(f'{Path.home()}/islatu')
startslurms=[x for x in startfiles if '.out' in x]
startslurms.append(startfiles[0])
startslurms.sort(key=lambda x: os.path.getmtime(f'{Path.home()}/islatu/{x}'))

#get latest slurm file before submitting job
endfiles=os.listdir(f'{Path.home()}/islatu')
endslurms=[x for x in endfiles if '.out' in x]
endslurms.append(endfiles[0])
endslurms.sort(key=lambda x: os.path.getmtime(f'{Path.home()}/islatu/{x}'))
count=0
limit=0

#call subprocess to submit job using wilson
subprocess.run(["ssh","wilson","cd islatu \nsbatch jobscript.sh"])
while endslurms[-1]==startslurms[-1]:
endfiles=os.listdir(f'{Path.home()}/islatu')
endslurms=[x for x in endfiles if '.out' in x]
endslurms.append(endfiles[0])
endslurms.sort(key=lambda x: os.path.getmtime(f'{Path.home()}/islatu/{x}'))
if count >50:
limit=1
break
print(f'Job submitted, waiting for SLURM output. Timer={5*count}',end="\r")
time.sleep(5)
count+=1
if limit==1:
print('Timer limit reached before new slurm ouput file found')
else:
print(f'Slurm output file: {Path.home()}/islatu//{endslurms[-1]}\n')
breakerline='*'*35
monitoring_line=f"\n{breakerline}\n ***STARTING TO MONITOR TAIL END OF FILE, TO EXIT THIS VIEW PRESS ANY LETTER FOLLOWED BY ENTER**** \n{breakerline} \n"
print(monitoring_line)
process = subprocess.Popen(["tail","-f",f"{Path.home()}/islatu//{endslurms[-1]}"], stdout=subprocess.PIPE, text=True)
target_phrase="Reduced data stored"
try:
for line in process.stdout:
if "Loading images" in line:
print(line.strip(),'\n')
elif"Currently loaded" in line:
print(f"\r{line.strip()}", end='')
else:
print(line.strip()) # Print each line of output
if re.search(target_phrase, line):
print(f"Target phrase '{target_phrase}' found. Closing tail.")
break
if( "Errno" in line) or ("error" in line) or ("Error" in line):
print("error found. closing tail")
break
finally:
process.terminate()
process.wait()
print("Python script on cluster completed successfully")

else:
i07reduce(args.scan_numbers, args.yaml_path, args.data_path,
filename=args.output, q_subsample_dicts=args.limit_q)
2 changes: 1 addition & 1 deletion docs/source/index.rst
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ If you need to contact the developers about anything, please either `raise an is

Contributors
------------
- Philip Mousley
- `Philip Mousley`
- `Richard Brearton`_
- `Andrew R. McCluskey`_

Expand Down
8 changes: 4 additions & 4 deletions src/islatu/background.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,8 +14,8 @@
from scipy.stats import norm
from scipy.optimize import curve_fit

from .region import Region
from .image import Image
from islatu.region import Region
from islatu.image import Image


@dataclass
Expand Down Expand Up @@ -70,8 +70,8 @@ def roi_subtraction(image, list_of_regions: List[Region]):
# the intensity measured in all the background regions so far.
sum_of_bkg_areas += np.sum(
image.array_original[
int(region.x_start):int(region.x_end),
int(region.y_start):int(region.y_end)
int(region.y_start):int(region.y_end),
int(region.x_start):int(region.x_end)
]
)
# Add the number of pixels in this background ROI to the total number of
Expand Down
2 changes: 1 addition & 1 deletion src/islatu/cropping.py
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,4 @@ def crop_to_region(array: np.ndarray, region: Region):
region:
The instance of Region to crop to.
"""
return array[region.x_start:region.x_end, region.y_start:region.y_end]
return array[region.y_start:region.y_end,region.x_start:region.x_end]
Loading

0 comments on commit 7699494

Please sign in to comment.