In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>

If you submit the job with job scheduler, above¶

below are list of enviroment variable one can pass

%env local='2"¶

local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'

%env ychunk='2'¶

%env tchunk='2'¶

controls chunk. 'False' sets no modification from original netcdf file's chunk.¶

ychunk=10 will group the original netcdf file to 10 by 10¶

tchunk=1 will chunk the time coordinate one by one¶

%env file_exp=¶

'file_exp': Which 'experiment' name is it?¶

. this corresopnds to intake catalog name without path and .yaml¶

%env year=¶

for Validation, this correspoinds to path/year/month 's year¶

for monitoring, this corresponids to 'date' having * means do all files in the monitoring directory¶

setting it as 0[0-9] &1[0-9]& *[2-3][0-9], the job can be separated in three lots.¶

%env month=¶

for monitoring this corresponds to file path path-XIOS.{month}/¶

#

%env control=FWC_SSH¶

name of control file to be used for computation/plots/save/ & how it is called from Monitor.sh¶

Monitor.sh calls M_MLD_2D

and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh

  • AWTD.sh M_AWTMD

  • Fluxnet.sh M_Fluxnet

  • Siconc.sh M_Ice_quantities
  • IceClim.sh M_IceClim M_IceConce M_IceThick

FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly

Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony

%env save= proceed saving? True or False , Default is setted as True¶

%env plot= proceed plotting? True or False , Default is setted as True¶

%env calc= proceed computation? or just load computed result? True or False , Default is setted as True¶

%env save=False¶

%env lazy=False¶

For debugging this cell can help¶

%env file_exp=SEDNA_DELTA_MONITOR %env year=2012 %env month=01

0[1-2]¶

%env ychunk=10 %env ychunk=False %env save=False %env plot=True %env calc=True # %env lazy=False

False¶

%env control=M_Fluxnet

M_Sectiony ok with ychunk=False local=True lazy=False¶

In [3]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True
using host= irene4874.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16
10000000000
False
tgcc local cluster starting
This code is running on  irene4874.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 01  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419116irene4874.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_01M_Ice_quantities/
CPU times: user 3.96 s, sys: 711 ms, total: 4.67 s
Wall time: 1min 42s
Out[3]:

Client

Client-f2f80921-13d8-11ed-82a3-080038b93b1f

Connection method: Cluster object Cluster type: distributed.LocalCluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCluster

e92ea649

Dashboard: http://127.0.0.1:8787/status Workers: 64
Total threads: 256 Total memory: 251.06 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-771f0638-ff62-42d3-9936-c5642b971cfd

Comm: tcp://127.0.0.1:36099 Workers: 64
Dashboard: http://127.0.0.1:8787/status Total threads: 256
Started: 1 minute ago Total memory: 251.06 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:33877 Total threads: 4
Dashboard: http://127.0.0.1:40127/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33183
Local directory: /tmp/dask-worker-space/worker-q2j3yc98

Worker: 1

Comm: tcp://127.0.0.1:41974 Total threads: 4
Dashboard: http://127.0.0.1:45794/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40847
Local directory: /tmp/dask-worker-space/worker-llxvhqof

Worker: 2

Comm: tcp://127.0.0.1:34665 Total threads: 4
Dashboard: http://127.0.0.1:40900/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34010
Local directory: /tmp/dask-worker-space/worker-z810zl57

Worker: 3

Comm: tcp://127.0.0.1:39274 Total threads: 4
Dashboard: http://127.0.0.1:33715/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37047
Local directory: /tmp/dask-worker-space/worker-gmwatt7e

Worker: 4

Comm: tcp://127.0.0.1:33177 Total threads: 4
Dashboard: http://127.0.0.1:38295/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39255
Local directory: /tmp/dask-worker-space/worker-0zgpuhw3

Worker: 5

Comm: tcp://127.0.0.1:41742 Total threads: 4
Dashboard: http://127.0.0.1:40562/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34751
Local directory: /tmp/dask-worker-space/worker-fu5m4y54

Worker: 6

Comm: tcp://127.0.0.1:34539 Total threads: 4
Dashboard: http://127.0.0.1:42287/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36236
Local directory: /tmp/dask-worker-space/worker-m6tvre39

Worker: 7

Comm: tcp://127.0.0.1:36667 Total threads: 4
Dashboard: http://127.0.0.1:35214/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38431
Local directory: /tmp/dask-worker-space/worker-tvthktr3

Worker: 8

Comm: tcp://127.0.0.1:40399 Total threads: 4
Dashboard: http://127.0.0.1:34237/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45310
Local directory: /tmp/dask-worker-space/worker-3geau138

Worker: 9

Comm: tcp://127.0.0.1:35975 Total threads: 4
Dashboard: http://127.0.0.1:40984/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40644
Local directory: /tmp/dask-worker-space/worker-sq6xbg20

Worker: 10

Comm: tcp://127.0.0.1:42444 Total threads: 4
Dashboard: http://127.0.0.1:39571/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40936
Local directory: /tmp/dask-worker-space/worker-owyr218d

Worker: 11

Comm: tcp://127.0.0.1:38498 Total threads: 4
Dashboard: http://127.0.0.1:36937/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43272
Local directory: /tmp/dask-worker-space/worker-_7v4zprc

Worker: 12

Comm: tcp://127.0.0.1:34300 Total threads: 4
Dashboard: http://127.0.0.1:46189/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33513
Local directory: /tmp/dask-worker-space/worker-svvm3t_b

Worker: 13

Comm: tcp://127.0.0.1:33002 Total threads: 4
Dashboard: http://127.0.0.1:40769/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43239
Local directory: /tmp/dask-worker-space/worker-7ouzifkf

Worker: 14

Comm: tcp://127.0.0.1:46775 Total threads: 4
Dashboard: http://127.0.0.1:38161/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44946
Local directory: /tmp/dask-worker-space/worker-9o41dhqu

Worker: 15

Comm: tcp://127.0.0.1:40345 Total threads: 4
Dashboard: http://127.0.0.1:41065/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33938
Local directory: /tmp/dask-worker-space/worker-qy1uakq6

Worker: 16

Comm: tcp://127.0.0.1:38086 Total threads: 4
Dashboard: http://127.0.0.1:38157/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33743
Local directory: /tmp/dask-worker-space/worker-3qe5ge1h

Worker: 17

Comm: tcp://127.0.0.1:41805 Total threads: 4
Dashboard: http://127.0.0.1:43060/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38228
Local directory: /tmp/dask-worker-space/worker-6re1vt72

Worker: 18

Comm: tcp://127.0.0.1:36510 Total threads: 4
Dashboard: http://127.0.0.1:33249/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35734
Local directory: /tmp/dask-worker-space/worker-pp6uvhds

Worker: 19

Comm: tcp://127.0.0.1:36481 Total threads: 4
Dashboard: http://127.0.0.1:37246/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44950
Local directory: /tmp/dask-worker-space/worker-wlm5fipu

Worker: 20

Comm: tcp://127.0.0.1:43997 Total threads: 4
Dashboard: http://127.0.0.1:44143/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42034
Local directory: /tmp/dask-worker-space/worker-g7kynnvg

Worker: 21

Comm: tcp://127.0.0.1:45746 Total threads: 4
Dashboard: http://127.0.0.1:33303/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42864
Local directory: /tmp/dask-worker-space/worker-9fld2a6i

Worker: 22

Comm: tcp://127.0.0.1:46345 Total threads: 4
Dashboard: http://127.0.0.1:34835/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41994
Local directory: /tmp/dask-worker-space/worker-132kvh8t

Worker: 23

Comm: tcp://127.0.0.1:41422 Total threads: 4
Dashboard: http://127.0.0.1:33889/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41255
Local directory: /tmp/dask-worker-space/worker-2i1k9os3

Worker: 24

Comm: tcp://127.0.0.1:45291 Total threads: 4
Dashboard: http://127.0.0.1:36350/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42715
Local directory: /tmp/dask-worker-space/worker-djhfcry8

Worker: 25

Comm: tcp://127.0.0.1:34458 Total threads: 4
Dashboard: http://127.0.0.1:42335/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36565
Local directory: /tmp/dask-worker-space/worker-l4f7yjaj

Worker: 26

Comm: tcp://127.0.0.1:35398 Total threads: 4
Dashboard: http://127.0.0.1:36942/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45397
Local directory: /tmp/dask-worker-space/worker-q9g6zn6s

Worker: 27

Comm: tcp://127.0.0.1:39023 Total threads: 4
Dashboard: http://127.0.0.1:44901/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43130
Local directory: /tmp/dask-worker-space/worker-gw_b3lwm

Worker: 28

Comm: tcp://127.0.0.1:36152 Total threads: 4
Dashboard: http://127.0.0.1:33070/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37235
Local directory: /tmp/dask-worker-space/worker-5twck3fc

Worker: 29

Comm: tcp://127.0.0.1:43104 Total threads: 4
Dashboard: http://127.0.0.1:36981/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46622
Local directory: /tmp/dask-worker-space/worker-hk5zm_et

Worker: 30

Comm: tcp://127.0.0.1:46322 Total threads: 4
Dashboard: http://127.0.0.1:39265/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45314
Local directory: /tmp/dask-worker-space/worker-xt18__vt

Worker: 31

Comm: tcp://127.0.0.1:46093 Total threads: 4
Dashboard: http://127.0.0.1:40310/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40969
Local directory: /tmp/dask-worker-space/worker-ie62nqb1

Worker: 32

Comm: tcp://127.0.0.1:45019 Total threads: 4
Dashboard: http://127.0.0.1:33643/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33146
Local directory: /tmp/dask-worker-space/worker-amsct7qs

Worker: 33

Comm: tcp://127.0.0.1:43553 Total threads: 4
Dashboard: http://127.0.0.1:41378/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39752
Local directory: /tmp/dask-worker-space/worker-v469xa2q

Worker: 34

Comm: tcp://127.0.0.1:41792 Total threads: 4
Dashboard: http://127.0.0.1:36606/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42559
Local directory: /tmp/dask-worker-space/worker-53llv043

Worker: 35

Comm: tcp://127.0.0.1:35663 Total threads: 4
Dashboard: http://127.0.0.1:41082/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44212
Local directory: /tmp/dask-worker-space/worker-elhtbrt0

Worker: 36

Comm: tcp://127.0.0.1:34352 Total threads: 4
Dashboard: http://127.0.0.1:34783/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43113
Local directory: /tmp/dask-worker-space/worker-6o4c09cz

Worker: 37

Comm: tcp://127.0.0.1:42761 Total threads: 4
Dashboard: http://127.0.0.1:36571/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36092
Local directory: /tmp/dask-worker-space/worker-rpkj5del

Worker: 38

Comm: tcp://127.0.0.1:37178 Total threads: 4
Dashboard: http://127.0.0.1:33904/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43413
Local directory: /tmp/dask-worker-space/worker-b409dqej

Worker: 39

Comm: tcp://127.0.0.1:35340 Total threads: 4
Dashboard: http://127.0.0.1:33136/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38126
Local directory: /tmp/dask-worker-space/worker-dbyikdtk

Worker: 40

Comm: tcp://127.0.0.1:46520 Total threads: 4
Dashboard: http://127.0.0.1:34294/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42901
Local directory: /tmp/dask-worker-space/worker-va54_wbz

Worker: 41

Comm: tcp://127.0.0.1:37743 Total threads: 4
Dashboard: http://127.0.0.1:36452/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39310
Local directory: /tmp/dask-worker-space/worker-m3uhp6r5

Worker: 42

Comm: tcp://127.0.0.1:38093 Total threads: 4
Dashboard: http://127.0.0.1:44935/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34163
Local directory: /tmp/dask-worker-space/worker-7ubqsiu_

Worker: 43

Comm: tcp://127.0.0.1:37336 Total threads: 4
Dashboard: http://127.0.0.1:36039/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33742
Local directory: /tmp/dask-worker-space/worker-qa7zwjsu

Worker: 44

Comm: tcp://127.0.0.1:44821 Total threads: 4
Dashboard: http://127.0.0.1:34519/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41970
Local directory: /tmp/dask-worker-space/worker-7mk_o8o4

Worker: 45

Comm: tcp://127.0.0.1:34568 Total threads: 4
Dashboard: http://127.0.0.1:35928/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38552
Local directory: /tmp/dask-worker-space/worker-7y7a5wcq

Worker: 46

Comm: tcp://127.0.0.1:36881 Total threads: 4
Dashboard: http://127.0.0.1:41389/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41377
Local directory: /tmp/dask-worker-space/worker-rbmy4h3z

Worker: 47

Comm: tcp://127.0.0.1:37681 Total threads: 4
Dashboard: http://127.0.0.1:45471/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44245
Local directory: /tmp/dask-worker-space/worker-eqm0j4pu

Worker: 48

Comm: tcp://127.0.0.1:35917 Total threads: 4
Dashboard: http://127.0.0.1:39056/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44526
Local directory: /tmp/dask-worker-space/worker-z5844i65

Worker: 49

Comm: tcp://127.0.0.1:43395 Total threads: 4
Dashboard: http://127.0.0.1:44322/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38493
Local directory: /tmp/dask-worker-space/worker-lkvl3tbw

Worker: 50

Comm: tcp://127.0.0.1:35981 Total threads: 4
Dashboard: http://127.0.0.1:43148/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38743
Local directory: /tmp/dask-worker-space/worker-szrv2itg

Worker: 51

Comm: tcp://127.0.0.1:41223 Total threads: 4
Dashboard: http://127.0.0.1:38273/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37257
Local directory: /tmp/dask-worker-space/worker-p64fgcxp

Worker: 52

Comm: tcp://127.0.0.1:41090 Total threads: 4
Dashboard: http://127.0.0.1:35193/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38151
Local directory: /tmp/dask-worker-space/worker-jf7rv1dp

Worker: 53

Comm: tcp://127.0.0.1:40695 Total threads: 4
Dashboard: http://127.0.0.1:46618/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38056
Local directory: /tmp/dask-worker-space/worker-njaqp2pl

Worker: 54

Comm: tcp://127.0.0.1:39982 Total threads: 4
Dashboard: http://127.0.0.1:38422/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45568
Local directory: /tmp/dask-worker-space/worker-q3gyuqu8

Worker: 55

Comm: tcp://127.0.0.1:39473 Total threads: 4
Dashboard: http://127.0.0.1:45907/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46110
Local directory: /tmp/dask-worker-space/worker-ord42_hr

Worker: 56

Comm: tcp://127.0.0.1:39893 Total threads: 4
Dashboard: http://127.0.0.1:39139/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34766
Local directory: /tmp/dask-worker-space/worker-k0bn5znz

Worker: 57

Comm: tcp://127.0.0.1:40686 Total threads: 4
Dashboard: http://127.0.0.1:43854/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39343
Local directory: /tmp/dask-worker-space/worker-ofr29seg

Worker: 58

Comm: tcp://127.0.0.1:45087 Total threads: 4
Dashboard: http://127.0.0.1:44027/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37191
Local directory: /tmp/dask-worker-space/worker-v_l2nmk3

Worker: 59

Comm: tcp://127.0.0.1:43370 Total threads: 4
Dashboard: http://127.0.0.1:40245/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39667
Local directory: /tmp/dask-worker-space/worker-8tcn1965

Worker: 60

Comm: tcp://127.0.0.1:35962 Total threads: 4
Dashboard: http://127.0.0.1:36018/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40284
Local directory: /tmp/dask-worker-space/worker-2s_f0b_7

Worker: 61

Comm: tcp://127.0.0.1:40486 Total threads: 4
Dashboard: http://127.0.0.1:33236/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35181
Local directory: /tmp/dask-worker-space/worker-5f59q05j

Worker: 62

Comm: tcp://127.0.0.1:39650 Total threads: 4
Dashboard: http://127.0.0.1:46229/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38214
Local directory: /tmp/dask-worker-space/worker-dum00lfj

Worker: 63

Comm: tcp://127.0.0.1:34606 Total threads: 4
Dashboard: http://127.0.0.1:45793/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41454
Local directory: /tmp/dask-worker-space/worker-7zkuthal

read plotting information from a csv file¶

In [4]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[4]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
Ice_quantities param.e1te2t,icemod.sivelo,icemod.sivolu,icemo... calc.Ice_quant(data) ALL Ice_intquant None (0,20) cm s^(-1) I-2

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [5]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= True df.Inputs != nothing True lazy= True
../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  ../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  <bound method DataSourceBase.describe of sources:
  param_xios:
    args:
      combine: nested
      concat_dim: y
      urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc
      xarray_kwargs:
        compat: override
        coords: minimal
        data_vars: minimal
        parallel: true
    description: SEDNA NEMO parameters from MPI output  nav_lon lat fails
    driver: intake_xarray.netcdf.NetCDFSource
    metadata:
      catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/
>
{'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output  nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}}
0 read icemod ['sivolu', 'siconc', 'sivelo']
lazy= True
using load_data_xios reading  icemod
using load_data_xios reading  {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}}
      took 267.37703800201416 seconds
0 merging icemod ['sivolu', 'siconc', 'sivelo']
param mask2d will be included in data
param nav_lat will be included in data
param nav_lon will be included in data
param e1te2t will be included in data
CPU times: user 1min 58s, sys: 27.9 s, total: 2min 26s
Wall time: 5min 26s
Out[5]:
<xarray.Dataset>
Dimensions:        (t: 31, y: 6540, x: 6560)
Coordinates:
  * t              (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00
  * y              (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540
  * x              (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560
    nav_lat        (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray>
    nav_lon        (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray>
    time_centered  (t) object dask.array<chunksize=(31,), meta=np.ndarray>
    mask2d         (y, x) bool dask.array<chunksize=(13, 6560), meta=np.ndarray>
    e1te2t         (y, x) float64 dask.array<chunksize=(13, 6560), meta=np.ndarray>
Data variables:
    sivolu         (t, y, x) float32 dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray>
    siconc         (t, y, x) float32 dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray>
    sivelo         (t, y, x) float32 dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray>
Attributes: (12/26)
    name:                    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN...
    description:             ice variables
    title:                   ice variables
    Conventions:             CF-1.6
    timeStamp:               2022-Jan-17 19:00:05 GMT
    uuid:                    65f78891-6a37-4a91-8ad4-7c8b5dc0d456
    ...                      ...
    start_date:              20090101
    output_frequency:        1d
    CONFIG:                  SEDNA
    CASE:                    DELTA
    history:                 Tue Jan 18 17:20:08 2022: ncks -4 -L 1 SEDNA-DEL...
    NCO:                     netCDF Operators version 4.9.1 (Homepage = http:...
xarray.Dataset
    • t: 31
    • y: 6540
    • x: 6560
    • t
      (t)
      object
      2012-01-01 12:00:00 ... 2012-01-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 1, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 28, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 29, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 30, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 31, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • y
      (y)
      int64
      1 2 3 4 5 ... 6537 6538 6539 6540
      array([   1,    2,    3, ..., 6538, 6539, 6540])
    • x
      (x)
      int64
      1 2 3 4 5 ... 6557 6558 6559 6560
      array([   1,    2,    3, ..., 6558, 6559, 6560])
    • nav_lat
      (y, x)
      float32
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 333.12 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • nav_lon
      (y, x)
      float32
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 333.12 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • time_centered
      (t)
      object
      dask.array<chunksize=(31,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 2 Tasks 1 Chunks
      Type object numpy.ndarray
      31 1
    • mask2d
      (y, x)
      bool
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 40.91 MiB 83.28 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type bool numpy.ndarray
      6560 6540
    • e1te2t
      (y, x)
      float64
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 327.32 MiB 666.25 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type float64 numpy.ndarray
      6560 6540
    • sivolu
      (t, y, x)
      float32
      dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_thickness
      long_name :
      ice volume
      units :
      m
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.95 GiB 4.95 GiB
      Shape (31, 6540, 6560) (31, 6540, 6560)
      Count 13 Tasks 1 Chunks
      Type float32 numpy.ndarray
      6560 6540 31
    • siconc
      (t, y, x)
      float32
      dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_area_fraction
      long_name :
      ice concentration
      units :
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.95 GiB 4.95 GiB
      Shape (31, 6540, 6560) (31, 6540, 6560)
      Count 13 Tasks 1 Chunks
      Type float32 numpy.ndarray
      6560 6540 31
    • sivelo
      (t, y, x)
      float32
      dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_speed
      long_name :
      ice velocity
      units :
      m/s
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.95 GiB 4.95 GiB
      Shape (31, 6540, 6560) (31, 6540, 6560)
      Count 13 Tasks 1 Chunks
      Type float32 numpy.ndarray
      6560 6540 31
  • name :
    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUNS/SEDNA-DELTA-XIOS.46/SEDNA-DELTA_1d_icemod
    description :
    ice variables
    title :
    ice variables
    Conventions :
    CF-1.6
    timeStamp :
    2022-Jan-17 19:00:05 GMT
    uuid :
    65f78891-6a37-4a91-8ad4-7c8b5dc0d456
    ibegin :
    0
    ni :
    6560
    jbegin :
    0
    nj :
    13
    DOMAIN_number_total :
    544
    DOMAIN_number :
    0
    DOMAIN_dimensions_ids :
    [2 3]
    DOMAIN_size_global :
    [6560 6540]
    DOMAIN_size_local :
    [6560 13]
    DOMAIN_position_first :
    [1 1]
    DOMAIN_position_last :
    [6560 13]
    DOMAIN_halo_size_start :
    [0 0]
    DOMAIN_halo_size_end :
    [0 0]
    DOMAIN_type :
    box
    start_date :
    20090101
    output_frequency :
    1d
    CONFIG :
    SEDNA
    CASE :
    DELTA
    history :
    Tue Jan 18 17:20:08 2022: ncks -4 -L 1 SEDNA-DELTA_1d_icemod_201201-201201_NOZIP_0000.nc /ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d/2012/01/SEDNA-DELTA_1d_icemod_201201-201201_0000.nc Tue Jan 18 17:20:02 2022: ncrcat -n 31,2,1 SEDNA-DELTA_1d_icemod_0000_01.nc SEDNA-DELTA_1d_icemod_201201-201201_NOZIP_0000.nc
    NCO :
    netCDF Operators version 4.9.1 (Homepage = http://nco.sf.net, Code = http://github.com/nco/nco)
In [6]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= True
#save= True
#plot= False
Value='Ice_quantities'
Zone='ALL'
Plot='Ice_intquant'
cmap='None'
clabel='cm s^(-1)'
clim= (0, 20)
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_Ice_intquant_ALL_Ice_quantities'
data=monitor.optimize_dataset(data)
#3 Start computing 
data= calc.Ice_quant(data)
monitor.optimize_dataset(data)
add optimise here once otimise can recognise
<xarray.Dataset>
Dimensions:        (t: 31)
Coordinates:
  * t              (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00
    time_centered  (t) object dask.array<chunksize=(31,), meta=np.ndarray>
Data variables:
    Ice volume     (t) float64 dask.array<chunksize=(31,), meta=np.ndarray>
    Ice area       (t) float64 dask.array<chunksize=(31,), meta=np.ndarray>
    Ice extent     (t) float64 dask.array<chunksize=(31,), meta=np.ndarray>
    Ice drift      (t) float64 dask.array<chunksize=(31,), meta=np.ndarray>
xarray.Dataset
    • t: 31
    • t
      (t)
      object
      2012-01-01 12:00:00 ... 2012-01-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 1, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 28, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 29, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 30, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 31, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • time_centered
      (t)
      object
      dask.array<chunksize=(31,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 2 Tasks 1 Chunks
      Type object numpy.ndarray
      31 1
    • Ice volume
      (t)
      float64
      dask.array<chunksize=(31,), meta=np.ndarray>
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 5995 Tasks 1 Chunks
      Type float64 numpy.ndarray
      31 1
    • Ice area
      (t)
      float64
      dask.array<chunksize=(31,), meta=np.ndarray>
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 5995 Tasks 1 Chunks
      Type float64 numpy.ndarray
      31 1
    • Ice extent
      (t)
      float64
      dask.array<chunksize=(31,), meta=np.ndarray>
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 5997 Tasks 1 Chunks
      Type float64 numpy.ndarray
      31 1
    • Ice drift
      (t)
      float64
      dask.array<chunksize=(31,), meta=np.ndarray>
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 12973 Tasks 1 Chunks
      Type float64 numpy.ndarray
      31 1
#4 Saving  SEDNA_Ice_intquant_ALL_Ice_quantities
data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)
start saving data
saving data in a  csv file ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_Ice_intquant_ALL_Ice_quantities2012-01-01_2012-01-31.nc
2022-08-04 11:43:11,203 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:33002 (pid=164740) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:11,423 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:43:11,465 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:11,555 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:40686 (pid=164781) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:11,778 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:12,156 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.70 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:43:12,258 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39274 (pid=164791) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:12,541 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:12,791 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.74 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:43:12,836 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39023 (pid=164611) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:13,059 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:13,353 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:34665 (pid=164749) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:13,595 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:14,002 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35340 (pid=164730) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:14,208 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:14,284 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35398 (pid=164616) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:14,662 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:43:15,183 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.68 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:43:15,315 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35917 (pid=164625) exceeded 99% memory budget. Restarting...
2022-08-04 11:43:16,041 - distributed.nanny - WARNING - Restarting worker
---------------------------------------------------------------------------
KilledWorker                              Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     87         print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' )
     88         with performance_report(filename=daskreport+"_save_"+step.Value+".html"):
---> 89             save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)                
     90 # 5. Plot       
     91     if plotswitch=='True': 

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:16, in datas(data, plot, path, filename)
     14 print('start saving data' )
     15 if 'int' in plot:
---> 16     savedfile=integral(data,path,filename)
     17     print('save computed data at',savedfile,'completed')  
     18 elif 'Mooring' in plot:

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:87, in integral(data, path, filename)
     85 print('saving data in a  csv file',filesave)
     86 #data[filename]=data
---> 87 data.to_netcdf(filesave,mode='w') 
     88 return filesave

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:1882, in Dataset.to_netcdf(self, path, mode, format, group, engine, encoding, unlimited_dims, compute, invalid_netcdf)
   1879     encoding = {}
   1880 from ..backends.api import to_netcdf
-> 1882 return to_netcdf(  # type: ignore  # mypy cannot resolve the overloads:(
   1883     self,
   1884     path,
   1885     mode=mode,
   1886     format=format,
   1887     group=group,
   1888     engine=engine,
   1889     encoding=encoding,
   1890     unlimited_dims=unlimited_dims,
   1891     compute=compute,
   1892     multifile=False,
   1893     invalid_netcdf=invalid_netcdf,
   1894 )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:1219, in to_netcdf(dataset, path_or_file, mode, format, group, engine, encoding, unlimited_dims, compute, multifile, invalid_netcdf)
   1216 if multifile:
   1217     return writer, store
-> 1219 writes = writer.sync(compute=compute)
   1221 if isinstance(target, BytesIO):
   1222     store.sync()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/common.py:168, in ArrayWriter.sync(self, compute)
    162 import dask.array as da
    164 # TODO: consider wrapping targets with dask.delayed, if this makes
    165 # for any discernible difference in perforance, e.g.,
    166 # targets = [dask.delayed(t) for t in self.targets]
--> 168 delayed_store = da.store(
    169     self.sources,
    170     self.targets,
    171     lock=self.lock,
    172     compute=compute,
    173     flush=True,
    174     regions=self.regions,
    175 )
    176 self.sources = []
    177 self.targets = []

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/core.py:1229, in store(***failed resolving arguments***)
   1227 elif compute:
   1228     store_dsk = HighLevelGraph(layers, dependencies)
-> 1229     compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
   1230     return None
   1232 else:

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:342, in compute_as_if_collection(cls, dsk, keys, scheduler, get, **kwargs)
    340 schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
    341 dsk2 = optimization_function(cls)(dsk, keys, **kwargs)
--> 342 return schedule(dsk2, keys, **kwargs)

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
   2999         should_rejoin = False
   3000 try:
-> 3001     results = self.gather(packed, asynchronous=asynchronous, direct=direct)
   3002 finally:
   3003     for f in futures.values():

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous)
   2173 else:
   2174     local_worker = None
-> 2175 return self.sync(
   2176     self._gather,
   2177     futures,
   2178     errors=errors,
   2179     direct=direct,
   2180     local_worker=local_worker,
   2181     asynchronous=asynchronous,
   2182 )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
    336     return future
    337 else:
--> 338     return sync(
    339         self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
    340     )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs)
    403 if error:
    404     typ, exc, tb = error
--> 405     raise exc.with_traceback(tb)
    406 else:
    407     return result

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f()
    376         future = asyncio.wait_for(future, callback_timeout)
    377     future = asyncio.ensure_future(future)
--> 378     result = yield future
    379 except Exception:
    380     error = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self)
    759 exc_info = None
    761 try:
--> 762     value = future.result()
    763 except Exception:
    764     exc_info = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker)
   2036         exc = CancelledError(key)
   2037     else:
-> 2038         raise exception.with_traceback(traceback)
   2039     raise exc
   2040 if errors == "skip":

KilledWorker: ("('open_dataset-getitem-getitem-getitem-f611aea20f9738d6721b37c6c7a61da4', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:35398', name: 26, status: closed, memory: 0, processing: 2>)