%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
If you submit the job with job scheduler; below are list of enviroment variable one can pass
local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'
%env ychunk='2', #%env tchunk='2'
controls chunk. 'False' sets no modification from original netcdf file's chunk.
ychunk=10 will group the original netcdf file to 10 by 10
tchunk=1 will chunk the time coordinate one by one
%env file_exp=
'file_exp': Which 'experiment' name is it? this corresopnds to intake catalog name without path and .yaml
#%env year=
for Validation, this correspoinds to path/year/month 's year for monitoring, this corresponids to 'date' having means do all files in the monitoring directory setting it as 0[0-9] &1[0-9]& [2-3][0-9], the job can be separated in three lots. For DELTA experiment, year corresponds to really 'year'
%env month=
for monitoring this corresponds to file path path-XIOS.{month}/
For DELTA experiment, year corresponds to really 'month'
proceed saving? True or False , Default is setted as True
proceed plotting? True or False , Default is setted as True
proceed computation? or just load computed result? True or False , Default is setted as True
save output file used for plotting
using kerchunked file -> False, not using kerhcunk -> True
name of control file to be used for computation/plots/save/ We have number of M_xxx.csv
Monitor.sh calls M_MLD_2D
and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh, Integrals.sh , Sections.sh
M_AWTMD
M_Fluxnet
M_Ice_quantities
M_IceClim M_IceConce M_IceThick
M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly
M_Mean_temp_velo M_Mooring
M_Sectionx M_Sectiony
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene5512.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 rome local cluster starting This code is running on irene5512.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 04 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419597irene5512.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_04M_AWTMD/ CPU times: user 565 ms, sys: 135 ms, total: 699 ms Wall time: 21.4 s
Client-dea02a6d-13e7-11ed-97cf-080038b9409b
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
2f14de10
Dashboard: http://127.0.0.1:8787/status | Workers: 16 |
Total threads: 128 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-8f72feac-48be-4770-a3b6-fd46386178f5
Comm: tcp://127.0.0.1:43941 | Workers: 16 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 128 |
Started: Just now | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:34586 | Total threads: 8 |
Dashboard: http://127.0.0.1:37116/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:33549 | |
Local directory: /tmp/dask-worker-space/worker-1gfwzmc9 |
Comm: tcp://127.0.0.1:37122 | Total threads: 8 |
Dashboard: http://127.0.0.1:35147/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:39490 | |
Local directory: /tmp/dask-worker-space/worker-wfzl7fbv |
Comm: tcp://127.0.0.1:35569 | Total threads: 8 |
Dashboard: http://127.0.0.1:41131/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:32859 | |
Local directory: /tmp/dask-worker-space/worker-flglwc1t |
Comm: tcp://127.0.0.1:35607 | Total threads: 8 |
Dashboard: http://127.0.0.1:44901/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:45328 | |
Local directory: /tmp/dask-worker-space/worker-woei_nls |
Comm: tcp://127.0.0.1:46613 | Total threads: 8 |
Dashboard: http://127.0.0.1:37388/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:42420 | |
Local directory: /tmp/dask-worker-space/worker-8e_vugi6 |
Comm: tcp://127.0.0.1:40878 | Total threads: 8 |
Dashboard: http://127.0.0.1:36318/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:33306 | |
Local directory: /tmp/dask-worker-space/worker-6oxi94o8 |
Comm: tcp://127.0.0.1:40637 | Total threads: 8 |
Dashboard: http://127.0.0.1:46803/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:34644 | |
Local directory: /tmp/dask-worker-space/worker-b4l1y10z |
Comm: tcp://127.0.0.1:45830 | Total threads: 8 |
Dashboard: http://127.0.0.1:42418/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41489 | |
Local directory: /tmp/dask-worker-space/worker-ueghx74a |
Comm: tcp://127.0.0.1:34760 | Total threads: 8 |
Dashboard: http://127.0.0.1:37306/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41407 | |
Local directory: /tmp/dask-worker-space/worker-zd26lc89 |
Comm: tcp://127.0.0.1:38648 | Total threads: 8 |
Dashboard: http://127.0.0.1:38918/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:46526 | |
Local directory: /tmp/dask-worker-space/worker-ytm9q503 |
Comm: tcp://127.0.0.1:34730 | Total threads: 8 |
Dashboard: http://127.0.0.1:40736/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:45346 | |
Local directory: /tmp/dask-worker-space/worker-inpms1zn |
Comm: tcp://127.0.0.1:44740 | Total threads: 8 |
Dashboard: http://127.0.0.1:38959/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41168 | |
Local directory: /tmp/dask-worker-space/worker-idtdhj5e |
Comm: tcp://127.0.0.1:33590 | Total threads: 8 |
Dashboard: http://127.0.0.1:45003/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:33602 | |
Local directory: /tmp/dask-worker-space/worker-l87943b4 |
Comm: tcp://127.0.0.1:37076 | Total threads: 8 |
Dashboard: http://127.0.0.1:38795/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:39039 | |
Local directory: /tmp/dask-worker-space/worker-r0un8_45 |
Comm: tcp://127.0.0.1:37876 | Total threads: 8 |
Dashboard: http://127.0.0.1:39243/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:34231 | |
Local directory: /tmp/dask-worker-space/worker-vkmf6_7u |
Comm: tcp://127.0.0.1:38857 | Total threads: 8 |
Dashboard: http://127.0.0.1:37538/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43144 | |
Local directory: /tmp/dask-worker-space/worker-41cvmp5q |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
AW_maxtemp_depth | gridT.votemper,gridS.vosaline,param.mask,param... | calc.AWTD4(data) | ALL | AWTD_map | jet | (0,800) | m | M-5 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= False df.Inputs != nothing True lazy= False CPU times: user 377 µs, sys: 58 µs, total: 435 µs Wall time: 436 µs
0
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= False #save= False #plot= True Value='AW_maxtemp_depth' Zone='ALL' Plot='AWTD_map' cmap='jet' clabel='m' clim= (0, 800) outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_AWTD_map_ALL_AW_maxtemp_depth' #3 no computing , loading starts data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename) start saving data filename= ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_AWTD_map_ALL_AW_maxtemp_depth/t_*/y_*/x_*.nc
--------------------------------------------------------------------------- ValueError Traceback (most recent call last) File <timed eval>:1, in <module> File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:79, in auto(df, val, savefig, daskreport, outputpath, file_exp) 77 print('data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)' ) 78 with performance_report(filename=daskreport+"_calc_"+step.Value+".html"): ---> 79 data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename) 80 #saveswitch=False 82 display(data) File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:38, in load_data(plot, path, filename) 36 data=load_twoD(path,filename,nested=False) 37 else: ---> 38 data=load_twoD(path,filename) 39 print('load computed data completed') 40 return data File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:48, in load_twoD(path, filename, nested) 46 dim=('x','y','t') if nested else ('t') 47 print ('filename=',filename) ---> 48 return xr.open_mfdataset(filename,parallel=True 49 ,compat='override' 50 ,data_vars='minimal' 51 ,concat_dim=dim 52 ,combine='nested' #param_xios 53 ,coords='minimal') File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:987, in open_mfdataset(paths, chunks, concat_dim, compat, preprocess, engine, data_vars, coords, combine, parallel, join, attrs_file, combine_attrs, **kwargs) 983 try: 984 if combine == "nested": 985 # Combined nested list by successive concat and merge operations 986 # along each dimension, using structure given by "ids" --> 987 combined = _nested_combine( 988 datasets, 989 concat_dims=concat_dim, 990 compat=compat, 991 data_vars=data_vars, 992 coords=coords, 993 ids=ids, 994 join=join, 995 combine_attrs=combine_attrs, 996 ) 997 elif combine == "by_coords": 998 # Redo ordering from coordinates, ignoring how they were ordered 999 # previously 1000 combined = combine_by_coords( 1001 datasets, 1002 compat=compat, (...) 1006 combine_attrs=combine_attrs, 1007 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:365, in _nested_combine(datasets, concat_dims, compat, data_vars, coords, ids, fill_value, join, combine_attrs) 362 _check_shape_tile_ids(combined_ids) 364 # Apply series of concatenate or merge operations along each dimension --> 365 combined = _combine_nd( 366 combined_ids, 367 concat_dims, 368 compat=compat, 369 data_vars=data_vars, 370 coords=coords, 371 fill_value=fill_value, 372 join=join, 373 combine_attrs=combine_attrs, 374 ) 375 return combined File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:228, in _combine_nd(combined_ids, concat_dims, data_vars, coords, compat, fill_value, join, combine_attrs) 226 n_dims = len(example_tile_id) 227 if len(concat_dims) != n_dims: --> 228 raise ValueError( 229 "concat_dims has length {} but the datasets " 230 "passed are nested in a {}-dimensional structure".format( 231 len(concat_dims), n_dims 232 ) 233 ) 235 # Each iteration of this loop reduces the length of the tile_ids tuples 236 # by one. It always combines along the first dimension, removing the first 237 # element of the tuple 238 for concat_dim in concat_dims: ValueError: concat_dims has length 3 but the datasets passed are nested in a 1-dimensional structure