In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>

If you submit the job with job scheduler, above¶

below are list of enviroment variable one can pass

%env local='2"¶

local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'

%env ychunk='2'¶

%env tchunk='2'¶

controls chunk. 'False' sets no modification from original netcdf file's chunk.¶

ychunk=10 will group the original netcdf file to 10 by 10¶

tchunk=1 will chunk the time coordinate one by one¶

%env file_exp=¶

'file_exp': Which 'experiment' name is it?¶

. this corresopnds to intake catalog name without path and .yaml¶

%env year=¶

for Validation, this correspoinds to path/year/month 's year¶

for monitoring, this corresponids to 'date' having * means do all files in the monitoring directory¶

setting it as 0[0-9] &1[0-9]& *[2-3][0-9], the job can be separated in three lots.¶

%env month=¶

for monitoring this corresponds to file path path-XIOS.{month}/¶

#

%env control=FWC_SSH¶

name of control file to be used for computation/plots/save/¶

AWTD.sh M_AWTMD

Ice_quant_flux.sh M_Fluxnet M_Ice_quantities

FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly

IceClim.sh M_IceClim M_IceConce M_IceThick

M_Mean_temp_velo M_MLD_2D M_Mooring M_Sectionx M_Sectiony

%env save= proceed saving? True or False , Default is setted as True¶

%env plot= proceed plotting? True or False , Default is setted as True¶

%env calc= proceed computation? or just load computed result? True or False , Default is setted as True¶

%env save=False¶

%env lazy=False¶

For debugging this cell can help¶

%env file_exp=SEDNA_DELTA_MONITOR %env year=2012 %env month=01

0[1-2]¶

%env ychunk=10 %env ychunk=False %env save=False %env plot=True %env calc=True # %env lazy=False

False¶

%env control=M_Fluxnet

M_Sectiony ok with ychunk=False local=True lazy=False¶

In [3]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True
using host= irene5816.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16
10000000000
False
rome local cluster starting
This code is running on  irene5816.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 02  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6417347irene5816.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_AWTMD/
CPU times: user 557 ms, sys: 136 ms, total: 693 ms
Wall time: 19.2 s
Out[3]:

Client

Client-493deb04-13c8-11ed-b9c7-080038b94289

Connection method: Cluster object Cluster type: distributed.LocalCluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCluster

cd64abea

Dashboard: http://127.0.0.1:8787/status Workers: 16
Total threads: 128 Total memory: 251.06 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-e7709952-6453-4086-955c-1920ec03d758

Comm: tcp://127.0.0.1:39997 Workers: 16
Dashboard: http://127.0.0.1:8787/status Total threads: 128
Started: Just now Total memory: 251.06 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:32963 Total threads: 8
Dashboard: http://127.0.0.1:34357/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:34869
Local directory: /tmp/dask-worker-space/worker-asivj9j1

Worker: 1

Comm: tcp://127.0.0.1:46072 Total threads: 8
Dashboard: http://127.0.0.1:41270/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:39976
Local directory: /tmp/dask-worker-space/worker-cndeonkj

Worker: 2

Comm: tcp://127.0.0.1:35754 Total threads: 8
Dashboard: http://127.0.0.1:40221/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:38567
Local directory: /tmp/dask-worker-space/worker-jifo5ci8

Worker: 3

Comm: tcp://127.0.0.1:41543 Total threads: 8
Dashboard: http://127.0.0.1:46534/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:42432
Local directory: /tmp/dask-worker-space/worker-n78b0hex

Worker: 4

Comm: tcp://127.0.0.1:41159 Total threads: 8
Dashboard: http://127.0.0.1:45208/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:41513
Local directory: /tmp/dask-worker-space/worker-2kjrjg5z

Worker: 5

Comm: tcp://127.0.0.1:38406 Total threads: 8
Dashboard: http://127.0.0.1:44510/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:43465
Local directory: /tmp/dask-worker-space/worker-km3n44gt

Worker: 6

Comm: tcp://127.0.0.1:40331 Total threads: 8
Dashboard: http://127.0.0.1:37334/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:41359
Local directory: /tmp/dask-worker-space/worker-tiysrzec

Worker: 7

Comm: tcp://127.0.0.1:34211 Total threads: 8
Dashboard: http://127.0.0.1:44079/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:46312
Local directory: /tmp/dask-worker-space/worker-ddf4859j

Worker: 8

Comm: tcp://127.0.0.1:41366 Total threads: 8
Dashboard: http://127.0.0.1:41398/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:34521
Local directory: /tmp/dask-worker-space/worker-z_r7nz66

Worker: 9

Comm: tcp://127.0.0.1:34776 Total threads: 8
Dashboard: http://127.0.0.1:41738/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:33007
Local directory: /tmp/dask-worker-space/worker-lmmf_r84

Worker: 10

Comm: tcp://127.0.0.1:39771 Total threads: 8
Dashboard: http://127.0.0.1:43082/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:36977
Local directory: /tmp/dask-worker-space/worker-f0vcubd4

Worker: 11

Comm: tcp://127.0.0.1:41397 Total threads: 8
Dashboard: http://127.0.0.1:41525/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:39616
Local directory: /tmp/dask-worker-space/worker-qy479e1k

Worker: 12

Comm: tcp://127.0.0.1:44630 Total threads: 8
Dashboard: http://127.0.0.1:44456/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:34830
Local directory: /tmp/dask-worker-space/worker-2upe7pd7

Worker: 13

Comm: tcp://127.0.0.1:43950 Total threads: 8
Dashboard: http://127.0.0.1:46731/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:33905
Local directory: /tmp/dask-worker-space/worker-4jb3l2c6

Worker: 14

Comm: tcp://127.0.0.1:35116 Total threads: 8
Dashboard: http://127.0.0.1:35957/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:43996
Local directory: /tmp/dask-worker-space/worker-5ekk_hp_

Worker: 15

Comm: tcp://127.0.0.1:36354 Total threads: 8
Dashboard: http://127.0.0.1:46357/status Memory: 15.69 GiB
Nanny: tcp://127.0.0.1:42823
Local directory: /tmp/dask-worker-space/worker-qleufnvo

read plotting information from a csv file¶

In [4]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[4]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
AW_maxtemp_depth gridT.votemper,gridS.vosaline,param.mask,param... calc.AWTD4(data) ALL AWTD_map jet (0,800) m M-5

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [5]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= False df.Inputs != nothing True lazy= False
CPU times: user 374 µs, sys: 0 ns, total: 374 µs
Wall time: 360 µs
Out[5]:
0
In [6]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= False
#save= False
#plot= True
Value='AW_maxtemp_depth'
Zone='ALL'
Plot='AWTD_map'
cmap='jet'
clabel='m'
clim= (0, 800)
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_AWTD_map_ALL_AW_maxtemp_depth'
#3 no computing , loading starts
data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)
start saving data
filename= ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_AWTD_map_ALL_AW_maxtemp_depth/t_*/y_*/x_*.nc
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:79, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     77     print('data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)' )
     78     with performance_report(filename=daskreport+"_calc_"+step.Value+".html"):
---> 79         data=save.load_data(plot=Plot,path=nc_outputpath,filename=filename)
     80     #saveswitch=False
     82 display(data)

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:38, in load_data(plot, path, filename)
     36     data=load_twoD(path,filename,nested=False)    
     37 else:
---> 38     data=load_twoD(path,filename)        
     39 print('load computed data completed')    
     40 return data

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:48, in load_twoD(path, filename, nested)
     46 dim=('x','y','t') if nested else ('t')
     47 print ('filename=',filename)
---> 48 return xr.open_mfdataset(filename,parallel=True
     49               ,compat='override'
     50               ,data_vars='minimal'
     51               ,concat_dim=dim
     52               ,combine='nested' #param_xios
     53               ,coords='minimal')

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:987, in open_mfdataset(paths, chunks, concat_dim, compat, preprocess, engine, data_vars, coords, combine, parallel, join, attrs_file, combine_attrs, **kwargs)
    983 try:
    984     if combine == "nested":
    985         # Combined nested list by successive concat and merge operations
    986         # along each dimension, using structure given by "ids"
--> 987         combined = _nested_combine(
    988             datasets,
    989             concat_dims=concat_dim,
    990             compat=compat,
    991             data_vars=data_vars,
    992             coords=coords,
    993             ids=ids,
    994             join=join,
    995             combine_attrs=combine_attrs,
    996         )
    997     elif combine == "by_coords":
    998         # Redo ordering from coordinates, ignoring how they were ordered
    999         # previously
   1000         combined = combine_by_coords(
   1001             datasets,
   1002             compat=compat,
   (...)
   1006             combine_attrs=combine_attrs,
   1007         )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:365, in _nested_combine(datasets, concat_dims, compat, data_vars, coords, ids, fill_value, join, combine_attrs)
    362 _check_shape_tile_ids(combined_ids)
    364 # Apply series of concatenate or merge operations along each dimension
--> 365 combined = _combine_nd(
    366     combined_ids,
    367     concat_dims,
    368     compat=compat,
    369     data_vars=data_vars,
    370     coords=coords,
    371     fill_value=fill_value,
    372     join=join,
    373     combine_attrs=combine_attrs,
    374 )
    375 return combined

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/combine.py:228, in _combine_nd(combined_ids, concat_dims, data_vars, coords, compat, fill_value, join, combine_attrs)
    226 n_dims = len(example_tile_id)
    227 if len(concat_dims) != n_dims:
--> 228     raise ValueError(
    229         "concat_dims has length {} but the datasets "
    230         "passed are nested in a {}-dimensional structure".format(
    231             len(concat_dims), n_dims
    232         )
    233     )
    235 # Each iteration of this loop reduces the length of the tile_ids tuples
    236 # by one. It always combines along the first dimension, removing the first
    237 # element of the tuple
    238 for concat_dim in concat_dims:

ValueError: concat_dims has length 3 but the datasets passed are nested in a 1-dimensional structure