In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
In [3]:
# 'month':  = 'JOBID' almost month but not really, 

# If you submit the job with job scheduler, above

#below are list of enviroment variable one can pass
#%env local='2"
# local : if True  run dask local cluster, if not true, put number of workers
# setted in the 'local'
# if no 'local ' given, local will be setted automatically to 'True'
#%env ychunk='2'
#%env tchunk='2'
# controls chunk. 'False' sets no modification from original netcdf file's chunk.  
# ychunk=10 will group the original netcdf file to 10 by 10 
# tchunk=1 will chunk the time coordinate one by one
#%env control=FWC_SSH 
# name of control file to be used for computation/plots/save/ 
#%env file_exp= 
# 'file_exp': Which 'experiment' name is it? 
#.    this corresopnds to intake catalog name without path and .yaml
#%env year=
# for Validation, this correspoinds to path/year/month 's year
# for monitoring, this corresponids to 'date'  having * means do all files in the monitoring directory
# setting it as *0[0-9] &*1[0-9]& *[2-3][0-9], the job can be separated in three lots.
#%env month=
# for monitoring  this corresponds to file path path-XIOS.{month}/
#
#%env save=   proceed saving?   True or False  , Default is setted as True 
#%env plot=   proceed plotting?  True or False , Default is setted as True 
#%env calc=   proceed computation? or just load computed result? True or False , Default is setted as True 
#%env save=False
#%env lazy=False
In [4]:
%env file_exp=SEDNA_DELTA_MONITOR
%env year=2012
%env month=02
%env ychunk=10
%env save=False
%env plot=True
%env calc=True
%env lazy=
#False
%env control=M_IceClim
#M_Sectiony ok with ychunk=False local=True  lazy=False 
env: file_exp=SEDNA_DELTA_MONITOR
env: year=2012
env: month=02
env: ychunk=10
env: save=False
env: plot=True
env: calc=True
env: lazy=
env: control=M_IceClim
In [5]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local 20
local false ,workers 20
using host= irene4343.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= False workers 20
10000000000
False
mg1: rome dask jobqueue
rome, slurmcluster starting
This code is running on  irene4343.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 02  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6414204irene4343.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_IceClim/_lazy
CPU times: user 653 ms, sys: 169 ms, total: 822 ms
Wall time: 36.1 s
Out[5]:

Client

Client-d976af9b-1349-11ed-868e-080038b93a01

Connection method: Cluster object Cluster type: dask_jobqueue.SLURMCluster
Dashboard: http://10.131.1.202:8787/status

Cluster Info

SLURMCluster

160f1cd9

Dashboard: http://10.131.1.202:8787/status Workers: 28
Total threads: 252 Total memory: 428.40 GiB

Scheduler Info

Scheduler

Scheduler-8cf7730e-c8e9-49d4-b01b-ec8d05fc8da0

Comm: tcp://10.131.1.202:40973 Workers: 28
Dashboard: http://10.131.1.202:8787/status Total threads: 252
Started: Just now Total memory: 428.40 GiB

Workers

Worker: SLURMCluster-0-0

Comm: tcp://10.131.4.53:38083 Total threads: 9
Dashboard: http://10.131.4.53:33564/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:43447
Local directory: /tmp/dask-worker-space/worker-co5wsqxk

Worker: SLURMCluster-0-1

Comm: tcp://10.131.4.53:34876 Total threads: 9
Dashboard: http://10.131.4.53:44202/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:46206
Local directory: /tmp/dask-worker-space/worker-3yregx8n

Worker: SLURMCluster-0-10

Comm: tcp://10.131.4.53:34169 Total threads: 9
Dashboard: http://10.131.4.53:37734/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:38096
Local directory: /tmp/dask-worker-space/worker-sh4hldvp

Worker: SLURMCluster-0-11

Comm: tcp://10.131.4.53:35397 Total threads: 9
Dashboard: http://10.131.4.53:39698/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:43546
Local directory: /tmp/dask-worker-space/worker-u7ypm6th

Worker: SLURMCluster-0-12

Comm: tcp://10.131.4.53:35162 Total threads: 9
Dashboard: http://10.131.4.53:34651/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:39950
Local directory: /tmp/dask-worker-space/worker-tb_racqp

Worker: SLURMCluster-0-13

Comm: tcp://10.131.4.53:42075 Total threads: 9
Dashboard: http://10.131.4.53:43910/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:34355
Local directory: /tmp/dask-worker-space/worker-hejtbvt0

Worker: SLURMCluster-0-2

Comm: tcp://10.131.4.53:37156 Total threads: 9
Dashboard: http://10.131.4.53:36675/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:37786
Local directory: /tmp/dask-worker-space/worker-jzyq9ld6

Worker: SLURMCluster-0-3

Comm: tcp://10.131.4.53:33444 Total threads: 9
Dashboard: http://10.131.4.53:37706/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:38462
Local directory: /tmp/dask-worker-space/worker-77jn51oq

Worker: SLURMCluster-0-4

Comm: tcp://10.131.4.53:37958 Total threads: 9
Dashboard: http://10.131.4.53:46701/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:36742
Local directory: /tmp/dask-worker-space/worker-4be07srx

Worker: SLURMCluster-0-5

Comm: tcp://10.131.4.53:43135 Total threads: 9
Dashboard: http://10.131.4.53:40458/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:45192
Local directory: /tmp/dask-worker-space/worker-xybpuq1p

Worker: SLURMCluster-0-6

Comm: tcp://10.131.4.53:39933 Total threads: 9
Dashboard: http://10.131.4.53:42357/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:46690
Local directory: /tmp/dask-worker-space/worker-674um7xh

Worker: SLURMCluster-0-7

Comm: tcp://10.131.4.53:34506 Total threads: 9
Dashboard: http://10.131.4.53:44544/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:38122
Local directory: /tmp/dask-worker-space/worker-f8tdj_78

Worker: SLURMCluster-0-8

Comm: tcp://10.131.4.53:38632 Total threads: 9
Dashboard: http://10.131.4.53:42150/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:33345
Local directory: /tmp/dask-worker-space/worker-jft177vp

Worker: SLURMCluster-0-9

Comm: tcp://10.131.4.53:43162 Total threads: 9
Dashboard: http://10.131.4.53:42575/status Memory: 15.30 GiB
Nanny: tcp://10.131.4.53:33742
Local directory: /tmp/dask-worker-space/worker-k6v74buq

Worker: SLURMCluster-1-0

Comm: tcp://10.131.12.5:36624 Total threads: 9
Dashboard: http://10.131.12.5:46525/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:42604
Local directory: /tmp/dask-worker-space/worker-2b7_vxl7

Worker: SLURMCluster-1-1

Comm: tcp://10.131.12.5:37048 Total threads: 9
Dashboard: http://10.131.12.5:34274/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:39846
Local directory: /tmp/dask-worker-space/worker-haktc76q

Worker: SLURMCluster-1-10

Comm: tcp://10.131.12.5:32780 Total threads: 9
Dashboard: http://10.131.12.5:42622/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:34405
Local directory: /tmp/dask-worker-space/worker-gwlsrhlp

Worker: SLURMCluster-1-11

Comm: tcp://10.131.12.5:33448 Total threads: 9
Dashboard: http://10.131.12.5:41348/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:41811
Local directory: /tmp/dask-worker-space/worker-eofzngdj

Worker: SLURMCluster-1-12

Comm: tcp://10.131.12.5:44780 Total threads: 9
Dashboard: http://10.131.12.5:38027/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:44764
Local directory: /tmp/dask-worker-space/worker-60d7srqd

Worker: SLURMCluster-1-13

Comm: tcp://10.131.12.5:36067 Total threads: 9
Dashboard: http://10.131.12.5:40688/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:46478
Local directory: /tmp/dask-worker-space/worker-at7rp24i

Worker: SLURMCluster-1-2

Comm: tcp://10.131.12.5:44870 Total threads: 9
Dashboard: http://10.131.12.5:33449/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:38676
Local directory: /tmp/dask-worker-space/worker-ou7kxl1_

Worker: SLURMCluster-1-3

Comm: tcp://10.131.12.5:38044 Total threads: 9
Dashboard: http://10.131.12.5:41333/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:46763
Local directory: /tmp/dask-worker-space/worker-wgwi7scp

Worker: SLURMCluster-1-4

Comm: tcp://10.131.12.5:36779 Total threads: 9
Dashboard: http://10.131.12.5:38920/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:38425
Local directory: /tmp/dask-worker-space/worker-6fr_hcrk

Worker: SLURMCluster-1-5

Comm: tcp://10.131.12.5:36846 Total threads: 9
Dashboard: http://10.131.12.5:33991/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:33976
Local directory: /tmp/dask-worker-space/worker-macpe44b

Worker: SLURMCluster-1-6

Comm: tcp://10.131.12.5:39151 Total threads: 9
Dashboard: http://10.131.12.5:40878/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:44646
Local directory: /tmp/dask-worker-space/worker-pvwclago

Worker: SLURMCluster-1-7

Comm: tcp://10.131.12.5:41971 Total threads: 9
Dashboard: http://10.131.12.5:38639/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:39836
Local directory: /tmp/dask-worker-space/worker-ys59iay7

Worker: SLURMCluster-1-8

Comm: tcp://10.131.12.5:44442 Total threads: 9
Dashboard: http://10.131.12.5:42001/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:37228
Local directory: /tmp/dask-worker-space/worker-btajoiom

Worker: SLURMCluster-1-9

Comm: tcp://10.131.12.5:42084 Total threads: 9
Dashboard: http://10.131.12.5:43520/status Memory: 15.30 GiB
Nanny: tcp://10.131.12.5:34700
Local directory: /tmp/dask-worker-space/worker-xrcz90pn

read plotting information from a csv file¶

In [6]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[6]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
IceClim calc.IceClim_load(data,nc_outputpath) ALL IceClim Spectral (0,5) m M-4

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [7]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','True' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= True df.Inputs != nothing False lazy= 
CPU times: user 284 µs, sys: 44 µs, total: 328 µs
Wall time: 327 µs
Out[7]:
0
In [8]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= True
#save= False
#plot= True
Value='IceClim'
Zone='ALL'
Plot='IceClim'
cmap='Spectral'
clabel='m'
clim= (0, 5)
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_IceClim_ALL_IceClim'
#3 Start computing 
dtaa= calc.IceClim_load(data,nc_outputpath)
start saving data
filename= ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_maps_ALL_IceConce/t_*/x_*/y_*.nc
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:64, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     62 #print('count:',data.count())
     63 with performance_report(filename=daskreport+"_calc_"+step.Value+".html"):
---> 64     data=eval(command)
     65     #print('persist ')
     66     #data=data.persist()
     67 #print(data.count())
     68 #print('nbytes:',data.nbytes)
     69 #print('count:',data.count())
     70 optimize_dataset(data)

File <string>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/calc.py:231, in IceClim_load(data, nc_outputpath)
    229 import xarray as xr
    230 filename='SEDNA_maps_ALL_IceConce'
--> 231 ds=save.load_data(plot='map',path=nc_outputpath,filename=filename)
    232 filename='SEDNA_maps_ALL_IceThickness'
    233 ds['sivolu']=save.load_data(plot='map',path=nc_outputpath,filename=filename).sivolu

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:38, in load_data(plot, path, filename)
     36     data=load_twoD(path,filename,nested=False)    
     37 else:
---> 38     data=load_twoD(path,filename)        
     39 print('load computed data completed')    
     40 return data

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:47, in load_twoD(path, filename, nested)
     45 filename=filesave+'/t_*/x_*/y_*.nc' if nested else filesave+'/t_*.nc'
     46 print ('filename=',filename)
---> 47 return xr.open_mfdataset(filename,parallel=True
     48               ,compat='override'
     49               ,data_vars='minimal'
     50               ,concat_dim=('x','y','t')
     51               ,combine='by_coords' #param_xios
     52               ,coords='minimal')

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:952, in open_mfdataset(paths, chunks, concat_dim, compat, preprocess, engine, data_vars, coords, combine, parallel, join, attrs_file, combine_attrs, **kwargs)
    947     ids, paths = (
    948         list(combined_ids_paths.keys()),
    949         list(combined_ids_paths.values()),
    950     )
    951 elif combine == "by_coords" and concat_dim is not None:
--> 952     raise ValueError(
    953         "When combine='by_coords', passing a value for `concat_dim` has no "
    954         "effect. To manually combine along a specific dimension you should "
    955         "instead specify combine='nested' along with a value for `concat_dim`.",
    956     )
    958 open_kwargs = dict(engine=engine, chunks=chunks or {}, **kwargs)
    960 if parallel:

ValueError: When combine='by_coords', passing a value for `concat_dim` has no effect. To manually combine along a specific dimension you should instead specify combine='nested' along with a value for `concat_dim`.
In [ ]: