In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
In [3]:
# 'month':  = 'JOBID' almost month but not really, 

# If you submit the job with job scheduler, above

#below are list of enviroment variable one can pass
#%env local='2"
# local : if True  run dask local cluster, if not true, put number of workers
# setted in the 'local'
# if no 'local ' given, local will be setted automatically to 'True'
#%env ychunk='2'
#%env tchunk='2'
# controls chunk. 'False' sets no modification from original netcdf file's chunk.  
# ychunk=10 will group the original netcdf file to 10 by 10 
# tchunk=1 will chunk the time coordinate one by one
#%env control=FWC_SSH 
# name of control file to be used for computation/plots/save/ 
#%env file_exp= 
# 'file_exp': Which 'experiment' name is it? 
#.    this corresopnds to intake catalog name without path and .yaml
#%env year=
# for Validation, this correspoinds to path/year/month 's year
# for monitoring, this corresponids to 'date'  having * means do all files in the monitoring directory
# setting it as *0[0-9] &*1[0-9]& *[2-3][0-9], the job can be separated in three lots.
#%env month=
# for monitoring  this corresponds to file path path-XIOS.{month}/
#
#%env save=   proceed saving?   True or False  , Default is setted as True 
#%env plot=   proceed plotting?  True or False , Default is setted as True 
#%env calc=   proceed computation? or just load computed result? True or False , Default is setted as True 
#%env save=False
#%env lazy=False
In [4]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True
using host= irene4142.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16
10000000000
False
rome local cluster starting
This code is running on  irene4142.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 02  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6414597irene4142.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_AWTMD/
CPU times: user 531 ms, sys: 117 ms, total: 648 ms
Wall time: 20.9 s
Out[4]:

Client

Client-3a8d5a2e-1358-11ed-915f-080038b9322d

Connection method: Cluster object Cluster type: distributed.LocalCluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCluster

b55104e4

Dashboard: http://127.0.0.1:8787/status Workers: 16
Total threads: 128 Total memory: 251.06 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-c971a72b-8431-452b-90e0-01317736ca3d

Comm: tcp://127.0.0.1:41769 Workers: 16
Dashboard: http://127.0.0.1:8787/status Total threads: 128
Started: Just now Total memory: 251.06 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:44814 Total threads: 8
Dashboard: http://127.0.0.1:44486/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:43510
Local directory: /tmp/dask-worker-space/worker-jfvkywpp

Worker: 1

Comm: tcp://127.0.0.1:43178 Total threads: 8
Dashboard: http://127.0.0.1:42874/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:36238
Local directory: /tmp/dask-worker-space/worker-8nevk2c4

Worker: 2

Comm: tcp://127.0.0.1:42894 Total threads: 8
Dashboard: http://127.0.0.1:43610/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:33854
Local directory: /tmp/dask-worker-space/worker-a9lk5h6h

Worker: 3

Comm: tcp://127.0.0.1:32920 Total threads: 8
Dashboard: http://127.0.0.1:46862/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:41216
Local directory: /tmp/dask-worker-space/worker-64g74q28

Worker: 4

Comm: tcp://127.0.0.1:46002 Total threads: 8
Dashboard: http://127.0.0.1:38051/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:42895
Local directory: /tmp/dask-worker-space/worker-d6bj2jka

Worker: 5

Comm: tcp://127.0.0.1:40274 Total threads: 8
Dashboard: http://127.0.0.1:43067/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:36750
Local directory: /tmp/dask-worker-space/worker-u4hkwcln

Worker: 6

Comm: tcp://127.0.0.1:36161 Total threads: 8
Dashboard: http://127.0.0.1:45874/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:42715
Local directory: /tmp/dask-worker-space/worker-1wfbbhu5

Worker: 7

Comm: tcp://127.0.0.1:35999 Total threads: 8
Dashboard: http://127.0.0.1:43481/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:44571
Local directory: /tmp/dask-worker-space/worker-ap4a6n4n

Worker: 8

Comm: tcp://127.0.0.1:45217 Total threads: 8
Dashboard: http://127.0.0.1:36561/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:39692
Local directory: /tmp/dask-worker-space/worker-syb7v92w

Worker: 9

Comm: tcp://127.0.0.1:43873 Total threads: 8
Dashboard: http://127.0.0.1:43813/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:43388
Local directory: /tmp/dask-worker-space/worker-qk7qixct

Worker: 10

Comm: tcp://127.0.0.1:37401 Total threads: 8
Dashboard: http://127.0.0.1:46242/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:40938
Local directory: /tmp/dask-worker-space/worker-ghw_m3uj

Worker: 11

Comm: tcp://127.0.0.1:42759 Total threads: 8
Dashboard: http://127.0.0.1:41320/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:32931
Local directory: /tmp/dask-worker-space/worker-6tdmp2qj

Worker: 12

Comm: tcp://127.0.0.1:35363 Total threads: 8
Dashboard: http://127.0.0.1:34578/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:44489
Local directory: /tmp/dask-worker-space/worker-ovxpsv54

Worker: 13

Comm: tcp://127.0.0.1:40104 Total threads: 8
Dashboard: http://127.0.0.1:43837/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:43443
Local directory: /tmp/dask-worker-space/worker-d3dlu4tk

Worker: 14

Comm: tcp://127.0.0.1:32928 Total threads: 8
Dashboard: http://127.0.0.1:39092/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:43093
Local directory: /tmp/dask-worker-space/worker-rarrk8fg

Worker: 15

Comm: tcp://127.0.0.1:39966 Total threads: 8
Dashboard: http://127.0.0.1:40043/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:33119
Local directory: /tmp/dask-worker-space/worker-54qsjqcu

read plotting information from a csv file¶

In [5]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[5]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
AW_maxtemp_depth gridT.votemper,gridS.vosaline,param.mask,param... calc.AWTD4(data) ALL AWTD_map jet (0,800) m M-5

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [6]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= False df.Inputs != nothing True lazy= False
CPU times: user 299 µs, sys: 48 µs, total: 347 µs
Wall time: 347 µs
Out[6]:
0
In [7]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= False
#save= False
#plot= True
Value='AW_maxtemp_depth'
Zone='ALL'
Plot='AWTD_map'
cmap='jet'
clabel='m'
clim= (0, 800)
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_AWTD_map_ALL_AW_maxtemp_depth'
#3 no computing , loading starts
dtaa=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)
start saving data
filename= ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_AWTD_map_ALL_AW_maxtemp_depth/t_*/y_*/x_*.nc
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:76, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     74     print('dtaa=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)' )
     75     with performance_report(filename=daskreport+"_calc_"+step.Value+".html"):
---> 76         data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)
     77     #saveswitch=False
     79 display(data)

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:38, in load_data(plot, path, filename)
     36     data=load_twoD(path,filename,nested=False)    
     37 else:
---> 38     data=load_twoD(path,filename)        
     39 print('load computed data completed')    
     40 return data

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:47, in load_twoD(path, filename, nested)
     45 filename=filesave+'/t_*/y_*/x_*.nc' if nested else filesave+'/t_*.nc'
     46 print ('filename=',filename)
---> 47 return xr.open_mfdataset(filename,parallel=True
     48               ,compat='override'
     49               ,data_vars='minimal'
     50               ,concat_dim=('x','y','t')
     51               ,combine='nested' #param_xios
     52               ,coords='minimal')

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:987, in open_mfdataset(paths, chunks, concat_dim, compat, preprocess, engine, data_vars, coords, combine, parallel, join, attrs_file, combine_attrs, **kwargs)
    983 try:
    984     if combine == "nested":
    985         # Combined nested list by successive concat and merge operations
    986         # along each dimension, using structure given by "ids"
--> 987         combined = _nested_combine(
    988             datasets,
    989             concat_dims=concat_dim,
    990             compat=compat,
    991             data_vars=data_vars,
    992             coords=coords,
    993             ids=ids,
    994             join=join,
    995             combine_attrs=combine_attrs,
    996         )
    997     elif combine == "by_coords":
    998         # Redo ordering from coordinates, ignoring how they were ordered
    999         # previously
   1000         combined = combine_by_coords(
   1001             datasets,
   1002             compat=compat,
   (...)
   1006             combine_attrs=combine_attrs,
   1007         )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:365, in _nested_combine(datasets, concat_dims, compat, data_vars, coords, ids, fill_value, join, combine_attrs)
    362 _check_shape_tile_ids(combined_ids)
    364 # Apply series of concatenate or merge operations along each dimension
--> 365 combined = _combine_nd(
    366     combined_ids,
    367     concat_dims,
    368     compat=compat,
    369     data_vars=data_vars,
    370     coords=coords,
    371     fill_value=fill_value,
    372     join=join,
    373     combine_attrs=combine_attrs,
    374 )
    375 return combined

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:228, in _combine_nd(combined_ids, concat_dims, data_vars, coords, compat, fill_value, join, combine_attrs)
    226 n_dims = len(example_tile_id)
    227 if len(concat_dims) != n_dims:
--> 228     raise ValueError(
    229         "concat_dims has length {} but the datasets "
    230         "passed are nested in a {}-dimensional structure".format(
    231             len(concat_dims), n_dims
    232         )
    233     )
    235 # Each iteration of this loop reduces the length of the tile_ids tuples
    236 # by one. It always combines along the first dimension, removing the first
    237 # element of the tuple
    238 for concat_dim in concat_dims:

ValueError: concat_dims has length 3 but the datasets passed are nested in a 1-dimensional structure