%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
If you submit the job with job scheduler; below are list of enviroment variable one can pass
local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'
%env ychunk='2', #%env tchunk='2'
controls chunk. 'False' sets no modification from original netcdf file's chunk.
ychunk=10 will group the original netcdf file to 10 by 10
tchunk=1 will chunk the time coordinate one by one
%env file_exp=
'file_exp': Which 'experiment' name is it? this corresopnds to intake catalog name without path and .yaml
#%env year=
for Validation, this correspoinds to path/year/month 's year for monitoring, this corresponids to 'date' having means do all files in the monitoring directory setting it as 0[0-9] &1[0-9]& [2-3][0-9], the job can be separated in three lots. For DELTA experiment, year corresponds to really 'year'
%env month=
for monitoring this corresponds to file path path-XIOS.{month}/
For DELTA experiment, year corresponds to really 'month'
proceed saving? True or False , Default is setted as True
proceed plotting? True or False , Default is setted as True
proceed computation? or just load computed result? True or False , Default is setted as True
save output file used for plotting
using kerchunked file -> False, not using kerhcunk -> True
name of control file to be used for computation/plots/save/ We have number of M_xxx.csv
Monitor.sh calls M_MLD_2D
and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh, Integrals.sh , Sections.sh
M_AWTMD
M_Fluxnet
M_Ice_quantities
M_IceClim M_IceConce M_IceThick
M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly
M_Mean_temp_velo M_Mooring
M_Sectionx M_Sectiony
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene4298.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 rome local cluster starting This code is running on irene4298.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 04 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6469506irene4298.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_04M_FWC_SSH/ CPU times: user 628 ms, sys: 110 ms, total: 738 ms Wall time: 20 s
Client-befdd783-18ce-11ed-97d6-080038b932ef
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
fe856785
Dashboard: http://127.0.0.1:8787/status | Workers: 16 |
Total threads: 128 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-8e765043-38c8-45fe-a768-d46215132360
Comm: tcp://127.0.0.1:44099 | Workers: 16 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 128 |
Started: Just now | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:35348 | Total threads: 8 |
Dashboard: http://127.0.0.1:44760/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:37733 | |
Local directory: /tmp/dask-worker-space/worker-xxck6zfp |
Comm: tcp://127.0.0.1:33685 | Total threads: 8 |
Dashboard: http://127.0.0.1:46554/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:39586 | |
Local directory: /tmp/dask-worker-space/worker-umrj9a9b |
Comm: tcp://127.0.0.1:34183 | Total threads: 8 |
Dashboard: http://127.0.0.1:35587/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:39352 | |
Local directory: /tmp/dask-worker-space/worker-v3tsarm8 |
Comm: tcp://127.0.0.1:41533 | Total threads: 8 |
Dashboard: http://127.0.0.1:43823/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:34092 | |
Local directory: /tmp/dask-worker-space/worker-qzuye0ms |
Comm: tcp://127.0.0.1:37081 | Total threads: 8 |
Dashboard: http://127.0.0.1:42755/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:33015 | |
Local directory: /tmp/dask-worker-space/worker-2u1khrkl |
Comm: tcp://127.0.0.1:38312 | Total threads: 8 |
Dashboard: http://127.0.0.1:42505/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:45076 | |
Local directory: /tmp/dask-worker-space/worker-2q60ohzk |
Comm: tcp://127.0.0.1:41649 | Total threads: 8 |
Dashboard: http://127.0.0.1:41004/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:36857 | |
Local directory: /tmp/dask-worker-space/worker-xysxy5_2 |
Comm: tcp://127.0.0.1:46423 | Total threads: 8 |
Dashboard: http://127.0.0.1:37487/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41692 | |
Local directory: /tmp/dask-worker-space/worker-v_g2xc_c |
Comm: tcp://127.0.0.1:37639 | Total threads: 8 |
Dashboard: http://127.0.0.1:38076/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:38554 | |
Local directory: /tmp/dask-worker-space/worker-dxxfo2_d |
Comm: tcp://127.0.0.1:43305 | Total threads: 8 |
Dashboard: http://127.0.0.1:35844/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:42889 | |
Local directory: /tmp/dask-worker-space/worker-ticjr7gb |
Comm: tcp://127.0.0.1:42187 | Total threads: 8 |
Dashboard: http://127.0.0.1:35693/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43393 | |
Local directory: /tmp/dask-worker-space/worker-hkdp78wp |
Comm: tcp://127.0.0.1:45731 | Total threads: 8 |
Dashboard: http://127.0.0.1:35746/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:40013 | |
Local directory: /tmp/dask-worker-space/worker-ay4wvjin |
Comm: tcp://127.0.0.1:37553 | Total threads: 8 |
Dashboard: http://127.0.0.1:37547/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:35812 | |
Local directory: /tmp/dask-worker-space/worker-zri4mw3a |
Comm: tcp://127.0.0.1:44926 | Total threads: 8 |
Dashboard: http://127.0.0.1:36798/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:37366 | |
Local directory: /tmp/dask-worker-space/worker-u4_ir4ea |
Comm: tcp://127.0.0.1:41402 | Total threads: 8 |
Dashboard: http://127.0.0.1:34513/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41638 | |
Local directory: /tmp/dask-worker-space/worker-xqfugrnx |
Comm: tcp://127.0.0.1:41917 | Total threads: 8 |
Dashboard: http://127.0.0.1:43745/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:44698 | |
Local directory: /tmp/dask-worker-space/worker-mu3mh1p8 |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
FWC_SSH | calc.FWC_SSH_load(data,nc_outputpath) | BBFG | FWC_SSH | None | None | m | S-1 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= True df.Inputs != nothing False lazy= False CPU times: user 343 µs, sys: 0 ns, total: 343 µs Wall time: 341 µs
0
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= True #save= False #plot= True Value='FWC_SSH' Zone='BBFG' Plot='FWC_SSH' cmap='None' clabel='m' clim= None outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_FWC_SSH_BBFG_FWC_SSH' #3 Start computing data= calc.FWC_SSH_load(data,nc_outputpath) monitor.optimize_dataset(data) start loading data filename= ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_maps_ALL_SSH_anomaly/t_*/y_*/x_*.nc dim ('x', 'y', 't')
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) File <timed eval>:1, in <module> File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:67, in auto(df, val, savefig, daskreport, outputpath, file_exp) 65 #print('count:',data.count()) 66 with performance_report(filename=daskreport+"_calc_"+step.Value+".html"): ---> 67 data=eval(command) 68 #print('persist ') 69 #data=data.persist() 70 print('add optimise here once otimise can recognise') File <string>:1, in <module> File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/calc.py:222, in FWC_SSH_load(data, nc_outputpath) 220 import xarray as xr 221 filename='SEDNA_maps_ALL_SSH_anomaly' --> 222 ds=zoom.BBFG(save.load_data(plot='map',path=nc_outputpath,filename=filename)) 223 #ds=save.load_data(plot='map',path=nc_outputpath,filename=filename) 224 filename='SEDNA_maps_BBFG_FWC_2D' File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:38, in load_data(plot, path, filename) 36 data=load_twoD(path,filename,nested=False) 37 else: ---> 38 data=load_twoD(path,filename) 39 print('load computed data completed') 40 return data File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:48, in load_twoD(path, filename, nested) 46 dim=('x','y','t') if nested else ('t') 47 print ('filename=',filename,'dim',dim) ---> 48 return xr.open_mfdataset(filename,parallel=True 49 ,compat='override' 50 ,data_vars='minimal' 51 #,concat_dim=dim 52 #,combine='nested' #param_xios 53 ,coords='minimal') File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:1000, in open_mfdataset(paths, chunks, concat_dim, compat, preprocess, engine, data_vars, coords, combine, parallel, join, attrs_file, combine_attrs, **kwargs) 987 combined = _nested_combine( 988 datasets, 989 concat_dims=concat_dim, (...) 995 combine_attrs=combine_attrs, 996 ) 997 elif combine == "by_coords": 998 # Redo ordering from coordinates, ignoring how they were ordered 999 # previously -> 1000 combined = combine_by_coords( 1001 datasets, 1002 compat=compat, 1003 data_vars=data_vars, 1004 coords=coords, 1005 join=join, 1006 combine_attrs=combine_attrs, 1007 ) 1008 else: 1009 raise ValueError( 1010 "{} is an invalid option for the keyword argument" 1011 " ``combine``".format(combine) 1012 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:982, in combine_by_coords(data_objects, compat, data_vars, coords, fill_value, join, combine_attrs, datasets) 980 concatenated_grouped_by_data_vars = [] 981 for vars, datasets_with_same_vars in grouped_by_vars: --> 982 concatenated = _combine_single_variable_hypercube( 983 list(datasets_with_same_vars), 984 fill_value=fill_value, 985 data_vars=data_vars, 986 coords=coords, 987 compat=compat, 988 join=join, 989 combine_attrs=combine_attrs, 990 ) 991 concatenated_grouped_by_data_vars.append(concatenated) 993 return merge( 994 concatenated_grouped_by_data_vars, 995 compat=compat, (...) 998 combine_attrs=combine_attrs, 999 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:655, in _combine_single_variable_hypercube(datasets, fill_value, data_vars, coords, compat, join, combine_attrs) 653 indexes = concatenated.indexes.get(dim) 654 if not (indexes.is_monotonic_increasing or indexes.is_monotonic_decreasing): --> 655 raise ValueError( 656 "Resulting object does not have monotonic" 657 " global indexes along dimension {}".format(dim) 658 ) 660 return concatenated ValueError: Resulting object does not have monotonic global indexes along dimension t