In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>

If you submit the job with job scheduler, above¶

below are list of enviroment variable one can pass

%env local='2"¶

local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'

%env ychunk='2'¶

%env tchunk='2'¶

controls chunk. 'False' sets no modification from original netcdf file's chunk.¶

ychunk=10 will group the original netcdf file to 10 by 10¶

tchunk=1 will chunk the time coordinate one by one¶

%env file_exp=¶

'file_exp': Which 'experiment' name is it?¶

. this corresopnds to intake catalog name without path and .yaml¶

%env year=¶

for Validation, this correspoinds to path/year/month 's year¶

for monitoring, this corresponids to 'date' having * means do all files in the monitoring directory¶

setting it as 0[0-9] &1[0-9]& *[2-3][0-9], the job can be separated in three lots.¶

%env month=¶

for monitoring this corresponds to file path path-XIOS.{month}/¶

#

%env control=FWC_SSH¶

name of control file to be used for computation/plots/save/ & how it is called from Monitor.sh¶

Monitor.sh calls M_MLD_2D

and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh

  • AWTD.sh M_AWTMD

  • Fluxnet.sh M_Fluxnet

  • Siconc.sh M_Ice_quantities
  • IceClim.sh M_IceClim M_IceConce M_IceThick

FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly

Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony

%env save= proceed saving? True or False , Default is setted as True¶

%env plot= proceed plotting? True or False , Default is setted as True¶

%env calc= proceed computation? or just load computed result? True or False , Default is setted as True¶

%env save=False¶

%env lazy=False¶

For debugging this cell can help¶

%env file_exp=SEDNA_DELTA_MONITOR %env year=2012 %env month=01

0[1-2]¶

%env ychunk=10 %env ychunk=False %env save=False %env plot=True %env calc=True # %env lazy=False

False¶

%env control=M_Fluxnet

M_Sectiony ok with ychunk=False local=True lazy=False¶

In [3]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True
using host= irene4596.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16
10000000000
False
tgcc local cluster starting
This code is running on  irene4596.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 02  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419108irene4596.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_IceConce/
CPU times: user 3.74 s, sys: 695 ms, total: 4.43 s
Wall time: 1min 35s
Out[3]:

Client

Client-ed3207ea-13d8-11ed-bf1e-080038b93cdf

Connection method: Cluster object Cluster type: distributed.LocalCluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCluster

ef4afea0

Dashboard: http://127.0.0.1:8787/status Workers: 64
Total threads: 256 Total memory: 251.06 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-3e0f7b40-b538-494e-b908-63433439e205

Comm: tcp://127.0.0.1:40282 Workers: 64
Dashboard: http://127.0.0.1:8787/status Total threads: 256
Started: 1 minute ago Total memory: 251.06 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:40434 Total threads: 4
Dashboard: http://127.0.0.1:38524/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38909
Local directory: /tmp/dask-worker-space/worker-rz47hn7b

Worker: 1

Comm: tcp://127.0.0.1:43137 Total threads: 4
Dashboard: http://127.0.0.1:35617/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42338
Local directory: /tmp/dask-worker-space/worker-_hwyomjt

Worker: 2

Comm: tcp://127.0.0.1:42263 Total threads: 4
Dashboard: http://127.0.0.1:43654/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33787
Local directory: /tmp/dask-worker-space/worker-mrn3foyl

Worker: 3

Comm: tcp://127.0.0.1:36166 Total threads: 4
Dashboard: http://127.0.0.1:39234/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37656
Local directory: /tmp/dask-worker-space/worker-tglls2yi

Worker: 4

Comm: tcp://127.0.0.1:46087 Total threads: 4
Dashboard: http://127.0.0.1:33309/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35128
Local directory: /tmp/dask-worker-space/worker-90we731e

Worker: 5

Comm: tcp://127.0.0.1:41999 Total threads: 4
Dashboard: http://127.0.0.1:45908/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39675
Local directory: /tmp/dask-worker-space/worker-o_r0ep0c

Worker: 6

Comm: tcp://127.0.0.1:33831 Total threads: 4
Dashboard: http://127.0.0.1:34427/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42778
Local directory: /tmp/dask-worker-space/worker-d334ikdj

Worker: 7

Comm: tcp://127.0.0.1:43702 Total threads: 4
Dashboard: http://127.0.0.1:38624/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37809
Local directory: /tmp/dask-worker-space/worker-9az0o0xq

Worker: 8

Comm: tcp://127.0.0.1:32891 Total threads: 4
Dashboard: http://127.0.0.1:44031/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34307
Local directory: /tmp/dask-worker-space/worker-qiptzmf9

Worker: 9

Comm: tcp://127.0.0.1:46711 Total threads: 4
Dashboard: http://127.0.0.1:35516/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43195
Local directory: /tmp/dask-worker-space/worker-ujunxkhm

Worker: 10

Comm: tcp://127.0.0.1:39012 Total threads: 4
Dashboard: http://127.0.0.1:46542/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41185
Local directory: /tmp/dask-worker-space/worker-ojwrak55

Worker: 11

Comm: tcp://127.0.0.1:44599 Total threads: 4
Dashboard: http://127.0.0.1:33728/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44121
Local directory: /tmp/dask-worker-space/worker-8kt1proq

Worker: 12

Comm: tcp://127.0.0.1:41253 Total threads: 4
Dashboard: http://127.0.0.1:36484/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40816
Local directory: /tmp/dask-worker-space/worker-uzg9iamk

Worker: 13

Comm: tcp://127.0.0.1:37723 Total threads: 4
Dashboard: http://127.0.0.1:45256/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38850
Local directory: /tmp/dask-worker-space/worker-096sugab

Worker: 14

Comm: tcp://127.0.0.1:46442 Total threads: 4
Dashboard: http://127.0.0.1:42063/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34421
Local directory: /tmp/dask-worker-space/worker-_c1dghun

Worker: 15

Comm: tcp://127.0.0.1:39826 Total threads: 4
Dashboard: http://127.0.0.1:36228/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35526
Local directory: /tmp/dask-worker-space/worker-0uasj0pb

Worker: 16

Comm: tcp://127.0.0.1:34208 Total threads: 4
Dashboard: http://127.0.0.1:34630/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45088
Local directory: /tmp/dask-worker-space/worker-vn2gwzix

Worker: 17

Comm: tcp://127.0.0.1:41802 Total threads: 4
Dashboard: http://127.0.0.1:45059/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43047
Local directory: /tmp/dask-worker-space/worker-yqqrsd_z

Worker: 18

Comm: tcp://127.0.0.1:45197 Total threads: 4
Dashboard: http://127.0.0.1:32924/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33848
Local directory: /tmp/dask-worker-space/worker-b7rbuc20

Worker: 19

Comm: tcp://127.0.0.1:33936 Total threads: 4
Dashboard: http://127.0.0.1:38479/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44321
Local directory: /tmp/dask-worker-space/worker-wu3rmsk3

Worker: 20

Comm: tcp://127.0.0.1:37442 Total threads: 4
Dashboard: http://127.0.0.1:44993/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38433
Local directory: /tmp/dask-worker-space/worker-7mvbhhnp

Worker: 21

Comm: tcp://127.0.0.1:42981 Total threads: 4
Dashboard: http://127.0.0.1:43206/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45657
Local directory: /tmp/dask-worker-space/worker-5_i2sdr9

Worker: 22

Comm: tcp://127.0.0.1:43724 Total threads: 4
Dashboard: http://127.0.0.1:46667/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34895
Local directory: /tmp/dask-worker-space/worker-5lmai64f

Worker: 23

Comm: tcp://127.0.0.1:42714 Total threads: 4
Dashboard: http://127.0.0.1:34262/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43902
Local directory: /tmp/dask-worker-space/worker-8gliarwj

Worker: 24

Comm: tcp://127.0.0.1:43377 Total threads: 4
Dashboard: http://127.0.0.1:38022/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40131
Local directory: /tmp/dask-worker-space/worker-c35p6bd6

Worker: 25

Comm: tcp://127.0.0.1:40337 Total threads: 4
Dashboard: http://127.0.0.1:41080/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35994
Local directory: /tmp/dask-worker-space/worker-xvlysr36

Worker: 26

Comm: tcp://127.0.0.1:43569 Total threads: 4
Dashboard: http://127.0.0.1:43981/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39278
Local directory: /tmp/dask-worker-space/worker-jfua2crs

Worker: 27

Comm: tcp://127.0.0.1:42692 Total threads: 4
Dashboard: http://127.0.0.1:46753/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42178
Local directory: /tmp/dask-worker-space/worker-j5x01qp6

Worker: 28

Comm: tcp://127.0.0.1:33455 Total threads: 4
Dashboard: http://127.0.0.1:43395/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35630
Local directory: /tmp/dask-worker-space/worker-zxalx6x1

Worker: 29

Comm: tcp://127.0.0.1:43194 Total threads: 4
Dashboard: http://127.0.0.1:32824/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33248
Local directory: /tmp/dask-worker-space/worker-h87j4_lx

Worker: 30

Comm: tcp://127.0.0.1:34459 Total threads: 4
Dashboard: http://127.0.0.1:38122/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42703
Local directory: /tmp/dask-worker-space/worker-x2zc1k2q

Worker: 31

Comm: tcp://127.0.0.1:40290 Total threads: 4
Dashboard: http://127.0.0.1:39129/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43873
Local directory: /tmp/dask-worker-space/worker-0q6bvwxt

Worker: 32

Comm: tcp://127.0.0.1:46167 Total threads: 4
Dashboard: http://127.0.0.1:40209/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35412
Local directory: /tmp/dask-worker-space/worker-3kuxtw71

Worker: 33

Comm: tcp://127.0.0.1:41962 Total threads: 4
Dashboard: http://127.0.0.1:37852/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41905
Local directory: /tmp/dask-worker-space/worker-yb7scvfz

Worker: 34

Comm: tcp://127.0.0.1:43883 Total threads: 4
Dashboard: http://127.0.0.1:32777/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42418
Local directory: /tmp/dask-worker-space/worker-ioh5y9ro

Worker: 35

Comm: tcp://127.0.0.1:39592 Total threads: 4
Dashboard: http://127.0.0.1:37706/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43420
Local directory: /tmp/dask-worker-space/worker-5abn1ym2

Worker: 36

Comm: tcp://127.0.0.1:46299 Total threads: 4
Dashboard: http://127.0.0.1:34978/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40120
Local directory: /tmp/dask-worker-space/worker-w5addm97

Worker: 37

Comm: tcp://127.0.0.1:34857 Total threads: 4
Dashboard: http://127.0.0.1:36932/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37514
Local directory: /tmp/dask-worker-space/worker-81zv8s79

Worker: 38

Comm: tcp://127.0.0.1:37385 Total threads: 4
Dashboard: http://127.0.0.1:43196/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44492
Local directory: /tmp/dask-worker-space/worker-ogu0dukr

Worker: 39

Comm: tcp://127.0.0.1:36215 Total threads: 4
Dashboard: http://127.0.0.1:33361/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39178
Local directory: /tmp/dask-worker-space/worker-72jjt7wl

Worker: 40

Comm: tcp://127.0.0.1:36965 Total threads: 4
Dashboard: http://127.0.0.1:36009/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43069
Local directory: /tmp/dask-worker-space/worker-zyk3e8vn

Worker: 41

Comm: tcp://127.0.0.1:41220 Total threads: 4
Dashboard: http://127.0.0.1:33793/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39871
Local directory: /tmp/dask-worker-space/worker-2tqkylbo

Worker: 42

Comm: tcp://127.0.0.1:36447 Total threads: 4
Dashboard: http://127.0.0.1:43710/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33496
Local directory: /tmp/dask-worker-space/worker-pdf7xnnu

Worker: 43

Comm: tcp://127.0.0.1:37339 Total threads: 4
Dashboard: http://127.0.0.1:35201/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35094
Local directory: /tmp/dask-worker-space/worker-fgqt01tu

Worker: 44

Comm: tcp://127.0.0.1:33442 Total threads: 4
Dashboard: http://127.0.0.1:41106/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39815
Local directory: /tmp/dask-worker-space/worker-wga9mi6o

Worker: 45

Comm: tcp://127.0.0.1:45178 Total threads: 4
Dashboard: http://127.0.0.1:33897/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39128
Local directory: /tmp/dask-worker-space/worker-60n3zbsi

Worker: 46

Comm: tcp://127.0.0.1:36753 Total threads: 4
Dashboard: http://127.0.0.1:41897/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46403
Local directory: /tmp/dask-worker-space/worker-tnv7iith

Worker: 47

Comm: tcp://127.0.0.1:39720 Total threads: 4
Dashboard: http://127.0.0.1:42658/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:32943
Local directory: /tmp/dask-worker-space/worker-1kjsaf3r

Worker: 48

Comm: tcp://127.0.0.1:41773 Total threads: 4
Dashboard: http://127.0.0.1:40893/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45400
Local directory: /tmp/dask-worker-space/worker-v0hpk4tr

Worker: 49

Comm: tcp://127.0.0.1:40043 Total threads: 4
Dashboard: http://127.0.0.1:40657/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39939
Local directory: /tmp/dask-worker-space/worker-7qbq8y2q

Worker: 50

Comm: tcp://127.0.0.1:43505 Total threads: 4
Dashboard: http://127.0.0.1:35881/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34302
Local directory: /tmp/dask-worker-space/worker-qk9ydt_h

Worker: 51

Comm: tcp://127.0.0.1:39652 Total threads: 4
Dashboard: http://127.0.0.1:38467/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34065
Local directory: /tmp/dask-worker-space/worker-oocqsa0g

Worker: 52

Comm: tcp://127.0.0.1:38830 Total threads: 4
Dashboard: http://127.0.0.1:44535/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42522
Local directory: /tmp/dask-worker-space/worker-1zow3wad

Worker: 53

Comm: tcp://127.0.0.1:35914 Total threads: 4
Dashboard: http://127.0.0.1:44906/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46211
Local directory: /tmp/dask-worker-space/worker-egaj3ae2

Worker: 54

Comm: tcp://127.0.0.1:37430 Total threads: 4
Dashboard: http://127.0.0.1:43048/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45143
Local directory: /tmp/dask-worker-space/worker-cxj13jij

Worker: 55

Comm: tcp://127.0.0.1:42500 Total threads: 4
Dashboard: http://127.0.0.1:34264/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38183
Local directory: /tmp/dask-worker-space/worker-g3zjh8ll

Worker: 56

Comm: tcp://127.0.0.1:38328 Total threads: 4
Dashboard: http://127.0.0.1:41918/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46457
Local directory: /tmp/dask-worker-space/worker-xwp_fn_d

Worker: 57

Comm: tcp://127.0.0.1:34719 Total threads: 4
Dashboard: http://127.0.0.1:39119/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42979
Local directory: /tmp/dask-worker-space/worker-q3uonokg

Worker: 58

Comm: tcp://127.0.0.1:36675 Total threads: 4
Dashboard: http://127.0.0.1:33550/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42657
Local directory: /tmp/dask-worker-space/worker-t53qdd3e

Worker: 59

Comm: tcp://127.0.0.1:33207 Total threads: 4
Dashboard: http://127.0.0.1:36114/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40986
Local directory: /tmp/dask-worker-space/worker-oxn9jzow

Worker: 60

Comm: tcp://127.0.0.1:42105 Total threads: 4
Dashboard: http://127.0.0.1:37012/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36300
Local directory: /tmp/dask-worker-space/worker-eoeb60ay

Worker: 61

Comm: tcp://127.0.0.1:40798 Total threads: 4
Dashboard: http://127.0.0.1:38324/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37204
Local directory: /tmp/dask-worker-space/worker-i89o675t

Worker: 62

Comm: tcp://127.0.0.1:32982 Total threads: 4
Dashboard: http://127.0.0.1:36848/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33997
Local directory: /tmp/dask-worker-space/worker-1dlgu3h0

Worker: 63

Comm: tcp://127.0.0.1:40583 Total threads: 4
Dashboard: http://127.0.0.1:43303/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42541
Local directory: /tmp/dask-worker-space/worker-7ug5vnoj

read plotting information from a csv file¶

In [4]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[4]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
IceConce icemod.siconc (data.siconc.where(data.siconc >0)).to_dataset... ALL maps Blues None M-4

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [5]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= True df.Inputs != nothing True lazy= True
../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  ../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  <bound method DataSourceBase.describe of sources:
  param_xios:
    args:
      combine: nested
      concat_dim: y
      urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc
      xarray_kwargs:
        compat: override
        coords: minimal
        data_vars: minimal
        parallel: true
    description: SEDNA NEMO parameters from MPI output  nav_lon lat fails
    driver: intake_xarray.netcdf.NetCDFSource
    metadata:
      catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/
>
{'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output  nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}}
0 read icemod ['siconc']
lazy= True
using load_data_xios reading  icemod
using load_data_xios reading  {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}}
      took 250.78706979751587 seconds
0 merging icemod ['siconc']
param nav_lon will be included in data
param nav_lat will be included in data
param mask2d will be included in data
ychunk= 10 calldatas_y_rechunk
sum_num (13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12)
start rechunking with (130, 122, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 48)
end of y_rechunk
CPU times: user 1min 56s, sys: 25.8 s, total: 2min 22s
Wall time: 5min 6s
Out[5]:
<xarray.Dataset>
Dimensions:        (t: 28, y: 6540, x: 6560)
Coordinates:
  * t              (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00
  * y              (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540
  * x              (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560
    nav_lat        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    nav_lon        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    time_centered  (t) object dask.array<chunksize=(28,), meta=np.ndarray>
    mask2d         (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray>
Data variables:
    siconc         (t, y, x) float32 dask.array<chunksize=(28, 130, 6560), meta=np.ndarray>
Attributes: (12/26)
    name:                    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN...
    description:             ice variables
    title:                   ice variables
    Conventions:             CF-1.6
    timeStamp:               2022-Jan-18 16:51:17 GMT
    uuid:                    56b165e2-bdda-4b33-a2e9-04a59f3d06e9
    ...                      ...
    start_date:              20090101
    output_frequency:        1d
    CONFIG:                  SEDNA
    CASE:                    DELTA
    history:                 Wed Jan 19 12:40:39 2022: ncks -4 -L 1 SEDNA-DEL...
    NCO:                     netCDF Operators version 4.9.1 (Homepage = http:...
xarray.Dataset
    • t: 28
    • y: 6540
    • x: 6560
    • t
      (t)
      object
      2012-02-01 12:00:00 ... 2012-02-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 2, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 28, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • y
      (y)
      int64
      1 2 3 4 5 ... 6537 6538 6539 6540
      array([   1,    2,    3, ..., 6538, 6539, 6540])
    • x
      (x)
      int64
      1 2 3 4 5 ... 6557 6558 6559 6560
      array([   1,    2,    3, ..., 6558, 6559, 6560])
    • nav_lat
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 1687 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • nav_lon
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 1687 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • time_centered
      (t)
      object
      dask.array<chunksize=(28,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 2 Tasks 1 Chunks
      Type object numpy.ndarray
      28 1
    • mask2d
      (y, x)
      bool
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 40.91 MiB 832.81 kiB
      Shape (6540, 6560) (130, 6560)
      Count 1687 Tasks 55 Chunks
      Type bool numpy.ndarray
      6560 6540
    • siconc
      (t, y, x)
      float32
      dask.array<chunksize=(28, 130, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_area_fraction
      long_name :
      ice concentration
      units :
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.48 GiB 91.09 MiB
      Shape (28, 6540, 6560) (28, 130, 6560)
      Count 1156 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540 28
  • name :
    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUNS/SEDNA-DELTA-XIOS.47/SEDNA-DELTA_1d_icemod
    description :
    ice variables
    title :
    ice variables
    Conventions :
    CF-1.6
    timeStamp :
    2022-Jan-18 16:51:17 GMT
    uuid :
    56b165e2-bdda-4b33-a2e9-04a59f3d06e9
    ibegin :
    0
    ni :
    6560
    jbegin :
    0
    nj :
    13
    DOMAIN_number_total :
    544
    DOMAIN_number :
    0
    DOMAIN_dimensions_ids :
    [2 3]
    DOMAIN_size_global :
    [6560 6540]
    DOMAIN_size_local :
    [6560 13]
    DOMAIN_position_first :
    [1 1]
    DOMAIN_position_last :
    [6560 13]
    DOMAIN_halo_size_start :
    [0 0]
    DOMAIN_halo_size_end :
    [0 0]
    DOMAIN_type :
    box
    start_date :
    20090101
    output_frequency :
    1d
    CONFIG :
    SEDNA
    CASE :
    DELTA
    history :
    Wed Jan 19 12:40:39 2022: ncks -4 -L 1 SEDNA-DELTA_1d_icemod_201202-201202_NOZIP_0000.nc /ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d/2012/02/SEDNA-DELTA_1d_icemod_201202-201202_0000.nc Wed Jan 19 12:40:28 2022: ncrcat -n 28,2,1 SEDNA-DELTA_1d_icemod_0000_01.nc SEDNA-DELTA_1d_icemod_201202-201202_NOZIP_0000.nc
    NCO :
    netCDF Operators version 4.9.1 (Homepage = http://nco.sf.net, Code = http://github.com/nco/nco)
In [6]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= True
#save= True
#plot= False
Value='IceConce'
Zone='ALL'
Plot='maps'
cmap='Blues'
clabel=' '
clim= None
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_maps_ALL_IceConce'
data=monitor.optimize_dataset(data)
#3 Start computing 
data= (data.siconc.where(data.siconc >0)).to_dataset(name='siconc').chunk({ 't': -1 }).unify_chunks().persist() 
monitor.optimize_dataset(data)
add optimise here once otimise can recognise
<xarray.Dataset>
Dimensions:        (t: 28, y: 6540, x: 6560)
Coordinates:
  * t              (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00
  * y              (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540
  * x              (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560
    nav_lat        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    nav_lon        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    time_centered  (t) object dask.array<chunksize=(28,), meta=np.ndarray>
    mask2d         (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray>
Data variables:
    siconc         (t, y, x) float32 dask.array<chunksize=(28, 130, 6560), meta=np.ndarray>
xarray.Dataset
    • t: 28
    • y: 6540
    • x: 6560
    • t
      (t)
      object
      2012-02-01 12:00:00 ... 2012-02-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 2, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 28, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • y
      (y)
      int64
      1 2 3 4 5 ... 6537 6538 6539 6540
      array([   1,    2,    3, ..., 6538, 6539, 6540])
    • x
      (x)
      int64
      1 2 3 4 5 ... 6557 6558 6559 6560
      array([   1,    2,    3, ..., 6558, 6559, 6560])
    • nav_lat
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 55 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • nav_lon
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 55 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • time_centered
      (t)
      object
      dask.array<chunksize=(28,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 1 Tasks 1 Chunks
      Type object numpy.ndarray
      28 1
    • mask2d
      (y, x)
      bool
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 40.91 MiB 832.81 kiB
      Shape (6540, 6560) (130, 6560)
      Count 55 Tasks 55 Chunks
      Type bool numpy.ndarray
      6560 6540
    • siconc
      (t, y, x)
      float32
      dask.array<chunksize=(28, 130, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_area_fraction
      long_name :
      ice concentration
      units :
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.48 GiB 91.09 MiB
      Shape (28, 6540, 6560) (28, 130, 6560)
      Count 55 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540 28
#4 Saving  SEDNA_maps_ALL_IceConce
data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)
start saving data
saving data in a file
t (28,)
0
slice(0, 28, None)
2022-08-04 11:41:42,803 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:32891 (pid=16438) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:43,029 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:32891
Traceback (most recent call last):
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer
    bytes_read = self.read_from_fd(buf)
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd
    return self.socket.recv_into(buf, len(buf))
ConnectionResetError: [Errno 104] Connection reset by peer

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect
    handshake = await asyncio.wait_for(comm.read(), time_left())
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for
    return fut.result()
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read
    convert_stream_closed_error(self, e)
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error
    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:38851 remote=tcp://127.0.0.1:32891>: ConnectionResetError: [Errno 104] Connection reset by peer

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep
    response = await get_data_from_worker(
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker
    return await retry_operation(_get_data, operation="get_data_from_worker")
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation
    return await retry(
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry
    return await coro()
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data
    comm = await rpc.connect(worker)
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect
    return await connect_attempt
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect
    comm = await connect(
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect
    raise OSError(
OSError: Timed out during handshake while connecting to tcp://127.0.0.1:32891 after 30 s
2022-08-04 11:41:43,048 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:41:44,050 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:37442 (pid=16263) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:44,256 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:41:45,082 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.57 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:41:45,212 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:40043 (pid=16356) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:45,480 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:41:46,370 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:41:46,469 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:43569 (pid=16436) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:46,738 - distributed.nanny - WARNING - Restarting worker
---------------------------------------------------------------------------
KilledWorker                              Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     87         print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' )
     88         with performance_report(filename=daskreport+"_save_"+step.Value+".html"):
---> 89             save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)                
     90 # 5. Plot       
     91     if plotswitch=='True': 

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:24, in datas(data, plot, path, filename)
     22     twoD(data,path,filename,nested=False)
     23 else :
---> 24     twoD(data,path,filename)
     25 return None

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:58, in twoD(data, path, filename, nested)
     56 print('saving data in a file')
     57 filesave=path+filename  
---> 58 return to_mfnetcdf_map(data,prefix=filesave, nested=nested)

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:229, in to_mfnetcdf_map(ds, prefix, nested)
    223     template=ds.isel(t=i)
    224     mapped=xr.map_blocks(
    225         create_eachfile, template
    226         ,kwargs=dict(prefix=prefix,nested=nested)
    227         ,template=template
    228         )  
--> 229     mapped.compute()
    231 return mapped

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:899, in Dataset.compute(self, **kwargs)
    880 """Manually trigger loading and/or computation of this dataset's data
    881 from disk or a remote source into memory and return a new dataset.
    882 Unlike load, the original dataset is left unaltered.
   (...)
    896 dask.compute
    897 """
    898 new = self.copy(deep=False)
--> 899 return new.load(**kwargs)

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:733, in Dataset.load(self, **kwargs)
    730 import dask.array as da
    732 # evaluate all the dask arrays simultaneously
--> 733 evaluated_data = da.compute(*lazy_data.values(), **kwargs)
    735 for k, data in zip(lazy_data, evaluated_data):
    736     self.variables[k].data = data

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:598, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs)
    595     keys.append(x.__dask_keys__())
    596     postcomputes.append(x.__dask_postcompute__())
--> 598 results = schedule(dsk, keys, **kwargs)
    599 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
   2999         should_rejoin = False
   3000 try:
-> 3001     results = self.gather(packed, asynchronous=asynchronous, direct=direct)
   3002 finally:
   3003     for f in futures.values():

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous)
   2173 else:
   2174     local_worker = None
-> 2175 return self.sync(
   2176     self._gather,
   2177     futures,
   2178     errors=errors,
   2179     direct=direct,
   2180     local_worker=local_worker,
   2181     asynchronous=asynchronous,
   2182 )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
    336     return future
    337 else:
--> 338     return sync(
    339         self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
    340     )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs)
    403 if error:
    404     typ, exc, tb = error
--> 405     raise exc.with_traceback(tb)
    406 else:
    407     return result

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f()
    376         future = asyncio.wait_for(future, callback_timeout)
    377     future = asyncio.ensure_future(future)
--> 378     result = yield future
    379 except Exception:
    380     error = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self)
    759 exc_info = None
    761 try:
--> 762     value = future.result()
    763 except Exception:
    764     exc_info = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker)
   2036         exc = CancelledError(key)
   2037     else:
-> 2038         raise exception.with_traceback(traceback)
   2039     raise exc
   2040 if errors == "skip":

KilledWorker: ("('open_dataset-getitem-getitem-81df9a53b6a0ff847a49fca70c0bc8bd', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:43569', name: 26, status: closed, memory: 0, processing: 1>)