Skip to content

Commit

Permalink
Compound flooding scripts: added a script to remove manual steps in x…
Browse files Browse the repository at this point in the history
…GEOID conversion; fixed a bug in pload* introduced earlier; added shebang to a few scripts.
  • Loading branch information
feiye-vims committed Jan 6, 2024
1 parent 49e702e commit 9b5c073
Show file tree
Hide file tree
Showing 7 changed files with 90 additions and 11 deletions.
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/usr/bin/env python3

import geopandas as gpd
import numpy as np
from pathlib import Path
Expand Down
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
#!/usr/bin/env python
#!/usr/bin/env python3

import numpy as np
from schism_py_pre_post import Datafiles
Expand Down
2 changes: 2 additions & 0 deletions src/Utility/Pre-Processing/SECOFS/Bathy_edit/NCF/load_NCF.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/usr/bin/env python3

import geopandas as gpd
import numpy as np
from pathlib import Path
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
import numpy as np
import subprocess
import os
from glob import glob
from pathlib import Path
import time

def file_check(input_fname, result_fname):
'''
Check if the result file is complete such that
it has all lines processed from the input file.
If not, put the failed lines into a file.
'''
input_fname = Path(input_fname)
result_fname = Path(result_fname)

arr1 = np.loadtxt(input_fname)
arr2 = np.loadtxt(result_fname)
failed_indices = np.where(~np.isin(arr1[:, 0], arr2[:, 0]))[0]

# put the failed lines into a file and try again
if len(failed_indices) > 0:
# save the failed portion of the original input file to a failed folder for later inspection
failed_folder = f'{input_fname.parent}_failed'
if not os.path.exists(failed_folder):
os.mkdir(failed_folder)
failed_file = f'{failed_folder}/{input_fname.name}'

# write the failed lines to a file
with open(input_fname, 'r') as file:
lines = file.readlines()
with open(failed_file, 'w') as file:
for index in failed_indices:
file.write(lines[index])

return failed_file
else:
return None

if __name__ == "__main__":
# ---------- inputs ----------
vdatum_folder = "/sciclone/schism10/feiye/SECOFS/Inputs/I03a/Bathy_edit/xGEOID/vdatum/"
regions = [4, 5] # must be in ascending order
# --------------------

# clear the result folder
os.system(f"rm -rf {vdatum_folder}/result/*")

# vdatum.jar needs to be in the same folder, and it treates all upper case letters as lower case in the input file name
os.chdir(vdatum_folder)
for i, region_id in enumerate(regions):
input_fnames = glob(f"region{region_id}/*.txt")

# Starting the processes
processes = [subprocess.Popen(f"java -jar vdatum.jar ihorz:NAD83_2011 ivert:navd88:m:height ohorz:igs14:geo:deg overt:xgeoid20b:m:height -file:txt:space,1,2,3,skip0:{fname}:result region:{region_id}", shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL) for fname in input_fnames]

# Wait for all processes to complete
for i, process in enumerate(processes):
exit_code = process.wait()
if exit_code == 0:
print(f"Process {i+1} completed successfully")
else:
print(f"Process {i+1} failed with exit code {exit_code}")

# Check if the output files are correct
for input_fname in input_fnames:
failed_file = file_check(input_fname, f'{vdatum_folder}/result/{Path(input_fname).name}') # check if the output file is complete
if failed_file is not None:
if region_id == regions[-1]:
raise Exception(f"No other groups to try: Failed to convert {failed_file}")
else:
print(f"Failed to convert {failed_file} in region{region_id}, trying the next region")
os.system(f"cp {failed_file} region{region_id+1}/") # copy the failed file (a portion of the original input file) to the next region's input folder


pass

Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
#!/usr/bin/env python3

'''
This script generates a polygon shapefile covering the spatial domain of hgrid.*,
with each polygon showing the corresponding source DEM.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -139,14 +139,13 @@
while(True):
try:
dpi,sindi=load_bathymetry(gd.x,gd.y,'{}/{}'.format(sdir,fname),fmt=1)
# reverse depth sign
if reverse_sign==1:
dpi=-dpi
break
except:
time.sleep(15)

# reverse depth sign
if reverse_sign==1:
dpi=-dpi

#save results
S.dp[bname]=dpi; S.sind[bname]=sindi
print('finished reading {},: {}, myrank={}'.format(fname,inum[m],myrank)); sys.stdout.flush()
Expand All @@ -172,10 +171,6 @@
gd.dp[sind]=dp; did[sind]=i+1
dnamei=[k for k in fnames0 if k.startswith(fname)][0]; dname.append(dnamei)

#reverse depth sign
if reverse_sign==1:
gd.dp=-gd.dp

#applying minimum depth
if regions is not None:
for i, region in enumerate(regions):
Expand Down
Original file line number Diff line number Diff line change
@@ -1,3 +1,5 @@
#!/usr/bin/env python3

from pylib_essentials.schism_file import read_schism_hgrid_cached, grd2sms, schism_grid
from pathlib import Path
import geopandas as gpd
Expand All @@ -20,7 +22,7 @@
# determine in-channel nodes
hg_points = gpd.GeoDataFrame(geometry=gpd.points_from_xy(hg.x, hg.y), crs='epsg:4326')
joined_gdf = gpd.sjoin(hg_points, river_polys, how="inner", op='within')
idx = joined_gdf.index.to_numpy()
idx = joined_gdf.index.to_numpy()

in_channel = np.zeros_like(hg.dp, dtype=bool)
in_channel[idx] = True
Expand All @@ -32,4 +34,4 @@
grd2sms(hg, f'{wdir}/{hg_file.stem}_dredged_{dredge_depth}m.2dm')
hg.save(f'{wdir}/{hg_file.stem}_dredged_{dredge_depth}m.gr3', fmt=1)

pass
pass

0 comments on commit 9b5c073

Please sign in to comment.