In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>

If you submit the job with job scheduler, above¶

below are list of enviroment variable one can pass

%env local='2"¶

local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'

%env ychunk='2'¶

%env tchunk='2'¶

controls chunk. 'False' sets no modification from original netcdf file's chunk.¶

ychunk=10 will group the original netcdf file to 10 by 10¶

tchunk=1 will chunk the time coordinate one by one¶

%env file_exp=¶

'file_exp': Which 'experiment' name is it?¶

. this corresopnds to intake catalog name without path and .yaml¶

%env year=¶

for Validation, this correspoinds to path/year/month 's year¶

for monitoring, this corresponids to 'date' having * means do all files in the monitoring directory¶

setting it as 0[0-9] &1[0-9]& *[2-3][0-9], the job can be separated in three lots.¶

%env month=¶

for monitoring this corresponds to file path path-XIOS.{month}/¶

#

%env control=FWC_SSH¶

name of control file to be used for computation/plots/save/ & how it is called from Monitor.sh¶

Monitor.sh calls M_MLD_2D

and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh

  • AWTD.sh M_AWTMD

  • Fluxnet.sh M_Fluxnet

  • Siconc.sh M_Ice_quantities
  • IceClim.sh M_IceClim M_IceConce M_IceThick

FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly

Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony

%env save= proceed saving? True or False , Default is setted as True¶

%env plot= proceed plotting? True or False , Default is setted as True¶

%env calc= proceed computation? or just load computed result? True or False , Default is setted as True¶

%env save=False¶

%env lazy=False¶

For debugging this cell can help¶

%env file_exp=SEDNA_DELTA_MONITOR %env year=2012 %env month=01

0[1-2]¶

%env ychunk=10 %env ychunk=False %env save=False %env plot=True %env calc=True # %env lazy=False

False¶

%env control=M_Fluxnet

M_Sectiony ok with ychunk=False local=True lazy=False¶

In [3]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True
using host= irene4875.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16
10000000000
False
tgcc local cluster starting
This code is running on  irene4875.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 02  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419117irene4875.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_Ice_quantities/
CPU times: user 3.73 s, sys: 754 ms, total: 4.49 s
Wall time: 1min 37s
Out[3]:

Client

Client-eef84e02-13d8-11ed-a0f9-080038b93b95

Connection method: Cluster object Cluster type: distributed.LocalCluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCluster

615d8441

Dashboard: http://127.0.0.1:8787/status Workers: 64
Total threads: 256 Total memory: 251.06 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-8a95abe9-46fc-408f-8510-75c7fcb1d6f5

Comm: tcp://127.0.0.1:39619 Workers: 64
Dashboard: http://127.0.0.1:8787/status Total threads: 256
Started: 1 minute ago Total memory: 251.06 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:33959 Total threads: 4
Dashboard: http://127.0.0.1:34963/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41896
Local directory: /tmp/dask-worker-space/worker-spentur8

Worker: 1

Comm: tcp://127.0.0.1:32962 Total threads: 4
Dashboard: http://127.0.0.1:34010/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37252
Local directory: /tmp/dask-worker-space/worker-iyvi0y56

Worker: 2

Comm: tcp://127.0.0.1:45520 Total threads: 4
Dashboard: http://127.0.0.1:39383/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34051
Local directory: /tmp/dask-worker-space/worker-bwizfekm

Worker: 3

Comm: tcp://127.0.0.1:35356 Total threads: 4
Dashboard: http://127.0.0.1:36805/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38167
Local directory: /tmp/dask-worker-space/worker-k2no8yax

Worker: 4

Comm: tcp://127.0.0.1:38646 Total threads: 4
Dashboard: http://127.0.0.1:46224/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40989
Local directory: /tmp/dask-worker-space/worker-bux2yt20

Worker: 5

Comm: tcp://127.0.0.1:33870 Total threads: 4
Dashboard: http://127.0.0.1:34088/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38520
Local directory: /tmp/dask-worker-space/worker-fjbo9fm2

Worker: 6

Comm: tcp://127.0.0.1:43474 Total threads: 4
Dashboard: http://127.0.0.1:43453/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35347
Local directory: /tmp/dask-worker-space/worker-k8w83tjo

Worker: 7

Comm: tcp://127.0.0.1:45092 Total threads: 4
Dashboard: http://127.0.0.1:34874/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37556
Local directory: /tmp/dask-worker-space/worker-ycoh620o

Worker: 8

Comm: tcp://127.0.0.1:34209 Total threads: 4
Dashboard: http://127.0.0.1:38266/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43462
Local directory: /tmp/dask-worker-space/worker-9aco8da4

Worker: 9

Comm: tcp://127.0.0.1:38028 Total threads: 4
Dashboard: http://127.0.0.1:35331/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36538
Local directory: /tmp/dask-worker-space/worker-hys9k1n2

Worker: 10

Comm: tcp://127.0.0.1:36732 Total threads: 4
Dashboard: http://127.0.0.1:35513/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35610
Local directory: /tmp/dask-worker-space/worker-vcpwprnr

Worker: 11

Comm: tcp://127.0.0.1:40142 Total threads: 4
Dashboard: http://127.0.0.1:42865/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35651
Local directory: /tmp/dask-worker-space/worker-fd9th0mz

Worker: 12

Comm: tcp://127.0.0.1:42261 Total threads: 4
Dashboard: http://127.0.0.1:44851/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44674
Local directory: /tmp/dask-worker-space/worker-gqeqgo63

Worker: 13

Comm: tcp://127.0.0.1:41537 Total threads: 4
Dashboard: http://127.0.0.1:43672/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41342
Local directory: /tmp/dask-worker-space/worker-0on76xo5

Worker: 14

Comm: tcp://127.0.0.1:38332 Total threads: 4
Dashboard: http://127.0.0.1:44640/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33051
Local directory: /tmp/dask-worker-space/worker-w8r8hid6

Worker: 15

Comm: tcp://127.0.0.1:33567 Total threads: 4
Dashboard: http://127.0.0.1:44102/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35757
Local directory: /tmp/dask-worker-space/worker-6pvc5s6u

Worker: 16

Comm: tcp://127.0.0.1:34801 Total threads: 4
Dashboard: http://127.0.0.1:45807/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45099
Local directory: /tmp/dask-worker-space/worker-i6z3o5yd

Worker: 17

Comm: tcp://127.0.0.1:43328 Total threads: 4
Dashboard: http://127.0.0.1:38857/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36295
Local directory: /tmp/dask-worker-space/worker-x7_0s8gb

Worker: 18

Comm: tcp://127.0.0.1:39804 Total threads: 4
Dashboard: http://127.0.0.1:37915/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43460
Local directory: /tmp/dask-worker-space/worker-ni08fyo6

Worker: 19

Comm: tcp://127.0.0.1:35330 Total threads: 4
Dashboard: http://127.0.0.1:41139/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35345
Local directory: /tmp/dask-worker-space/worker-s57cyqb2

Worker: 20

Comm: tcp://127.0.0.1:39304 Total threads: 4
Dashboard: http://127.0.0.1:42437/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39907
Local directory: /tmp/dask-worker-space/worker-1h13k3fv

Worker: 21

Comm: tcp://127.0.0.1:45248 Total threads: 4
Dashboard: http://127.0.0.1:40679/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42276
Local directory: /tmp/dask-worker-space/worker-jeyi7yk0

Worker: 22

Comm: tcp://127.0.0.1:43947 Total threads: 4
Dashboard: http://127.0.0.1:33291/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38499
Local directory: /tmp/dask-worker-space/worker-69sq7dwo

Worker: 23

Comm: tcp://127.0.0.1:41257 Total threads: 4
Dashboard: http://127.0.0.1:36517/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43713
Local directory: /tmp/dask-worker-space/worker-skc8wc0w

Worker: 24

Comm: tcp://127.0.0.1:39240 Total threads: 4
Dashboard: http://127.0.0.1:33357/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36812
Local directory: /tmp/dask-worker-space/worker-tt91b_5u

Worker: 25

Comm: tcp://127.0.0.1:44489 Total threads: 4
Dashboard: http://127.0.0.1:34274/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35476
Local directory: /tmp/dask-worker-space/worker-os4u8k1v

Worker: 26

Comm: tcp://127.0.0.1:39687 Total threads: 4
Dashboard: http://127.0.0.1:33754/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43803
Local directory: /tmp/dask-worker-space/worker-ymvswu3a

Worker: 27

Comm: tcp://127.0.0.1:39282 Total threads: 4
Dashboard: http://127.0.0.1:37582/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42699
Local directory: /tmp/dask-worker-space/worker-q0wp130e

Worker: 28

Comm: tcp://127.0.0.1:46687 Total threads: 4
Dashboard: http://127.0.0.1:41431/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43518
Local directory: /tmp/dask-worker-space/worker-cwbqkw5w

Worker: 29

Comm: tcp://127.0.0.1:38063 Total threads: 4
Dashboard: http://127.0.0.1:39855/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41175
Local directory: /tmp/dask-worker-space/worker-bckahikt

Worker: 30

Comm: tcp://127.0.0.1:39545 Total threads: 4
Dashboard: http://127.0.0.1:33068/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44252
Local directory: /tmp/dask-worker-space/worker-u1i0jdfj

Worker: 31

Comm: tcp://127.0.0.1:39524 Total threads: 4
Dashboard: http://127.0.0.1:46178/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33213
Local directory: /tmp/dask-worker-space/worker-u61mqzjo

Worker: 32

Comm: tcp://127.0.0.1:41409 Total threads: 4
Dashboard: http://127.0.0.1:45143/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43984
Local directory: /tmp/dask-worker-space/worker-_4lzkhxl

Worker: 33

Comm: tcp://127.0.0.1:36171 Total threads: 4
Dashboard: http://127.0.0.1:39829/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33967
Local directory: /tmp/dask-worker-space/worker-ko33tr37

Worker: 34

Comm: tcp://127.0.0.1:42180 Total threads: 4
Dashboard: http://127.0.0.1:42970/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46332
Local directory: /tmp/dask-worker-space/worker-slcbsts8

Worker: 35

Comm: tcp://127.0.0.1:34301 Total threads: 4
Dashboard: http://127.0.0.1:38248/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41271
Local directory: /tmp/dask-worker-space/worker-y1kmbg0b

Worker: 36

Comm: tcp://127.0.0.1:40139 Total threads: 4
Dashboard: http://127.0.0.1:45292/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34674
Local directory: /tmp/dask-worker-space/worker-6cbs6cid

Worker: 37

Comm: tcp://127.0.0.1:41501 Total threads: 4
Dashboard: http://127.0.0.1:33495/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39340
Local directory: /tmp/dask-worker-space/worker-h0wfvj6l

Worker: 38

Comm: tcp://127.0.0.1:35672 Total threads: 4
Dashboard: http://127.0.0.1:44872/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38390
Local directory: /tmp/dask-worker-space/worker-iaaayrmi

Worker: 39

Comm: tcp://127.0.0.1:42034 Total threads: 4
Dashboard: http://127.0.0.1:41456/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43058
Local directory: /tmp/dask-worker-space/worker-ms2gvv4t

Worker: 40

Comm: tcp://127.0.0.1:35316 Total threads: 4
Dashboard: http://127.0.0.1:39681/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38372
Local directory: /tmp/dask-worker-space/worker-okyp858m

Worker: 41

Comm: tcp://127.0.0.1:37064 Total threads: 4
Dashboard: http://127.0.0.1:36288/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35073
Local directory: /tmp/dask-worker-space/worker-7l7jq34e

Worker: 42

Comm: tcp://127.0.0.1:42166 Total threads: 4
Dashboard: http://127.0.0.1:44200/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35372
Local directory: /tmp/dask-worker-space/worker-n6vsknj4

Worker: 43

Comm: tcp://127.0.0.1:46219 Total threads: 4
Dashboard: http://127.0.0.1:39588/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43113
Local directory: /tmp/dask-worker-space/worker-ypyjwnzx

Worker: 44

Comm: tcp://127.0.0.1:42961 Total threads: 4
Dashboard: http://127.0.0.1:33623/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35619
Local directory: /tmp/dask-worker-space/worker-kypt3y61

Worker: 45

Comm: tcp://127.0.0.1:33077 Total threads: 4
Dashboard: http://127.0.0.1:37353/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34503
Local directory: /tmp/dask-worker-space/worker-xowc9h_0

Worker: 46

Comm: tcp://127.0.0.1:34260 Total threads: 4
Dashboard: http://127.0.0.1:40899/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45911
Local directory: /tmp/dask-worker-space/worker-ia5lk7ux

Worker: 47

Comm: tcp://127.0.0.1:33907 Total threads: 4
Dashboard: http://127.0.0.1:41839/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34234
Local directory: /tmp/dask-worker-space/worker-7konqyg4

Worker: 48

Comm: tcp://127.0.0.1:34071 Total threads: 4
Dashboard: http://127.0.0.1:43596/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45761
Local directory: /tmp/dask-worker-space/worker-izciiz13

Worker: 49

Comm: tcp://127.0.0.1:38600 Total threads: 4
Dashboard: http://127.0.0.1:37143/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43067
Local directory: /tmp/dask-worker-space/worker-9kt3z8pn

Worker: 50

Comm: tcp://127.0.0.1:44491 Total threads: 4
Dashboard: http://127.0.0.1:34989/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41987
Local directory: /tmp/dask-worker-space/worker-_45ngcbg

Worker: 51

Comm: tcp://127.0.0.1:45363 Total threads: 4
Dashboard: http://127.0.0.1:37335/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36022
Local directory: /tmp/dask-worker-space/worker-99ypkq3_

Worker: 52

Comm: tcp://127.0.0.1:39199 Total threads: 4
Dashboard: http://127.0.0.1:45291/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34446
Local directory: /tmp/dask-worker-space/worker-xhwzzoj8

Worker: 53

Comm: tcp://127.0.0.1:38138 Total threads: 4
Dashboard: http://127.0.0.1:38544/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33435
Local directory: /tmp/dask-worker-space/worker-ggnr766i

Worker: 54

Comm: tcp://127.0.0.1:39897 Total threads: 4
Dashboard: http://127.0.0.1:37596/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42758
Local directory: /tmp/dask-worker-space/worker-xrd048bp

Worker: 55

Comm: tcp://127.0.0.1:37146 Total threads: 4
Dashboard: http://127.0.0.1:35180/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35971
Local directory: /tmp/dask-worker-space/worker-olw892n1

Worker: 56

Comm: tcp://127.0.0.1:39194 Total threads: 4
Dashboard: http://127.0.0.1:38844/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40921
Local directory: /tmp/dask-worker-space/worker-x5friyi_

Worker: 57

Comm: tcp://127.0.0.1:38255 Total threads: 4
Dashboard: http://127.0.0.1:46026/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44119
Local directory: /tmp/dask-worker-space/worker-yp48itg4

Worker: 58

Comm: tcp://127.0.0.1:38097 Total threads: 4
Dashboard: http://127.0.0.1:44616/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34208
Local directory: /tmp/dask-worker-space/worker-0k6v4674

Worker: 59

Comm: tcp://127.0.0.1:35978 Total threads: 4
Dashboard: http://127.0.0.1:39305/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44682
Local directory: /tmp/dask-worker-space/worker-u6wg17uw

Worker: 60

Comm: tcp://127.0.0.1:43857 Total threads: 4
Dashboard: http://127.0.0.1:46183/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39405
Local directory: /tmp/dask-worker-space/worker-9zf5g4_e

Worker: 61

Comm: tcp://127.0.0.1:39256 Total threads: 4
Dashboard: http://127.0.0.1:33403/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35693
Local directory: /tmp/dask-worker-space/worker-5q5l25_0

Worker: 62

Comm: tcp://127.0.0.1:44717 Total threads: 4
Dashboard: http://127.0.0.1:44573/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38162
Local directory: /tmp/dask-worker-space/worker-6jqrsi4f

Worker: 63

Comm: tcp://127.0.0.1:42088 Total threads: 4
Dashboard: http://127.0.0.1:33441/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42303
Local directory: /tmp/dask-worker-space/worker-w0762nkg

read plotting information from a csv file¶

In [4]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[4]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
Ice_quantities param.e1te2t,icemod.sivelo,icemod.sivolu,icemo... calc.Ice_quant(data) ALL Ice_intquant None (0,20) cm s^(-1) I-2

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [5]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= True df.Inputs != nothing True lazy= True
../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  ../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  <bound method DataSourceBase.describe of sources:
  param_xios:
    args:
      combine: nested
      concat_dim: y
      urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc
      xarray_kwargs:
        compat: override
        coords: minimal
        data_vars: minimal
        parallel: true
    description: SEDNA NEMO parameters from MPI output  nav_lon lat fails
    driver: intake_xarray.netcdf.NetCDFSource
    metadata:
      catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/
>
{'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output  nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}}
0 read icemod ['sivolu', 'siconc', 'sivelo']
lazy= True
using load_data_xios reading  icemod
using load_data_xios reading  {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}}
      took 256.9019663333893 seconds
0 merging icemod ['sivolu', 'siconc', 'sivelo']
param nav_lon will be included in data
param mask2d will be included in data
param e1te2t will be included in data
param nav_lat will be included in data
CPU times: user 1min 57s, sys: 26.2 s, total: 2min 23s
Wall time: 5min 14s
Out[5]:
<xarray.Dataset>
Dimensions:        (t: 28, y: 6540, x: 6560)
Coordinates:
  * t              (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00
  * y              (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540
  * x              (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560
    nav_lat        (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray>
    nav_lon        (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray>
    time_centered  (t) object dask.array<chunksize=(28,), meta=np.ndarray>
    mask2d         (y, x) bool dask.array<chunksize=(13, 6560), meta=np.ndarray>
    e1te2t         (y, x) float64 dask.array<chunksize=(13, 6560), meta=np.ndarray>
Data variables:
    sivolu         (t, y, x) float32 dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray>
    siconc         (t, y, x) float32 dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray>
    sivelo         (t, y, x) float32 dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray>
Attributes: (12/26)
    name:                    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN...
    description:             ice variables
    title:                   ice variables
    Conventions:             CF-1.6
    timeStamp:               2022-Jan-18 16:51:17 GMT
    uuid:                    56b165e2-bdda-4b33-a2e9-04a59f3d06e9
    ...                      ...
    start_date:              20090101
    output_frequency:        1d
    CONFIG:                  SEDNA
    CASE:                    DELTA
    history:                 Wed Jan 19 12:40:39 2022: ncks -4 -L 1 SEDNA-DEL...
    NCO:                     netCDF Operators version 4.9.1 (Homepage = http:...
xarray.Dataset
    • t: 28
    • y: 6540
    • x: 6560
    • t
      (t)
      object
      2012-02-01 12:00:00 ... 2012-02-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 2, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 28, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • y
      (y)
      int64
      1 2 3 4 5 ... 6537 6538 6539 6540
      array([   1,    2,    3, ..., 6538, 6539, 6540])
    • x
      (x)
      int64
      1 2 3 4 5 ... 6557 6558 6559 6560
      array([   1,    2,    3, ..., 6558, 6559, 6560])
    • nav_lat
      (y, x)
      float32
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 333.12 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • nav_lon
      (y, x)
      float32
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 333.12 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • time_centered
      (t)
      object
      dask.array<chunksize=(28,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 2 Tasks 1 Chunks
      Type object numpy.ndarray
      28 1
    • mask2d
      (y, x)
      bool
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 40.91 MiB 83.28 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type bool numpy.ndarray
      6560 6540
    • e1te2t
      (y, x)
      float64
      dask.array<chunksize=(13, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 327.32 MiB 666.25 kiB
      Shape (6540, 6560) (13, 6560)
      Count 1632 Tasks 544 Chunks
      Type float64 numpy.ndarray
      6560 6540
    • sivolu
      (t, y, x)
      float32
      dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_thickness
      long_name :
      ice volume
      units :
      m
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.48 GiB 4.48 GiB
      Shape (28, 6540, 6560) (28, 6540, 6560)
      Count 13 Tasks 1 Chunks
      Type float32 numpy.ndarray
      6560 6540 28
    • siconc
      (t, y, x)
      float32
      dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_area_fraction
      long_name :
      ice concentration
      units :
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.48 GiB 4.48 GiB
      Shape (28, 6540, 6560) (28, 6540, 6560)
      Count 13 Tasks 1 Chunks
      Type float32 numpy.ndarray
      6560 6540 28
    • sivelo
      (t, y, x)
      float32
      dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_speed
      long_name :
      ice velocity
      units :
      m/s
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.48 GiB 4.48 GiB
      Shape (28, 6540, 6560) (28, 6540, 6560)
      Count 13 Tasks 1 Chunks
      Type float32 numpy.ndarray
      6560 6540 28
  • name :
    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUNS/SEDNA-DELTA-XIOS.47/SEDNA-DELTA_1d_icemod
    description :
    ice variables
    title :
    ice variables
    Conventions :
    CF-1.6
    timeStamp :
    2022-Jan-18 16:51:17 GMT
    uuid :
    56b165e2-bdda-4b33-a2e9-04a59f3d06e9
    ibegin :
    0
    ni :
    6560
    jbegin :
    0
    nj :
    13
    DOMAIN_number_total :
    544
    DOMAIN_number :
    0
    DOMAIN_dimensions_ids :
    [2 3]
    DOMAIN_size_global :
    [6560 6540]
    DOMAIN_size_local :
    [6560 13]
    DOMAIN_position_first :
    [1 1]
    DOMAIN_position_last :
    [6560 13]
    DOMAIN_halo_size_start :
    [0 0]
    DOMAIN_halo_size_end :
    [0 0]
    DOMAIN_type :
    box
    start_date :
    20090101
    output_frequency :
    1d
    CONFIG :
    SEDNA
    CASE :
    DELTA
    history :
    Wed Jan 19 12:40:39 2022: ncks -4 -L 1 SEDNA-DELTA_1d_icemod_201202-201202_NOZIP_0000.nc /ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d/2012/02/SEDNA-DELTA_1d_icemod_201202-201202_0000.nc Wed Jan 19 12:40:28 2022: ncrcat -n 28,2,1 SEDNA-DELTA_1d_icemod_0000_01.nc SEDNA-DELTA_1d_icemod_201202-201202_NOZIP_0000.nc
    NCO :
    netCDF Operators version 4.9.1 (Homepage = http://nco.sf.net, Code = http://github.com/nco/nco)
In [6]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= True
#save= True
#plot= False
Value='Ice_quantities'
Zone='ALL'
Plot='Ice_intquant'
cmap='None'
clabel='cm s^(-1)'
clim= (0, 20)
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_Ice_intquant_ALL_Ice_quantities'
data=monitor.optimize_dataset(data)
#3 Start computing 
data= calc.Ice_quant(data)
monitor.optimize_dataset(data)
add optimise here once otimise can recognise
<xarray.Dataset>
Dimensions:        (t: 28)
Coordinates:
  * t              (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00
    time_centered  (t) object dask.array<chunksize=(28,), meta=np.ndarray>
Data variables:
    Ice volume     (t) float64 dask.array<chunksize=(28,), meta=np.ndarray>
    Ice area       (t) float64 dask.array<chunksize=(28,), meta=np.ndarray>
    Ice extent     (t) float64 dask.array<chunksize=(28,), meta=np.ndarray>
    Ice drift      (t) float64 dask.array<chunksize=(28,), meta=np.ndarray>
xarray.Dataset
    • t: 28
    • t
      (t)
      object
      2012-02-01 12:00:00 ... 2012-02-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 2, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 2, 28, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • time_centered
      (t)
      object
      dask.array<chunksize=(28,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 2 Tasks 1 Chunks
      Type object numpy.ndarray
      28 1
    • Ice volume
      (t)
      float64
      dask.array<chunksize=(28,), meta=np.ndarray>
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 5995 Tasks 1 Chunks
      Type float64 numpy.ndarray
      28 1
    • Ice area
      (t)
      float64
      dask.array<chunksize=(28,), meta=np.ndarray>
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 5995 Tasks 1 Chunks
      Type float64 numpy.ndarray
      28 1
    • Ice extent
      (t)
      float64
      dask.array<chunksize=(28,), meta=np.ndarray>
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 5997 Tasks 1 Chunks
      Type float64 numpy.ndarray
      28 1
    • Ice drift
      (t)
      float64
      dask.array<chunksize=(28,), meta=np.ndarray>
      Array Chunk
      Bytes 224 B 224 B
      Shape (28,) (28,)
      Count 12973 Tasks 1 Chunks
      Type float64 numpy.ndarray
      28 1
#4 Saving  SEDNA_Ice_intquant_ALL_Ice_quantities
data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)
start saving data
saving data in a  csv file ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_Ice_intquant_ALL_Ice_quantities2012-02-01_2012-02-28.nc
2022-08-04 11:42:49,740 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.71 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:42:49,833 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:32962 (pid=221611) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:50,059 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:42:50,136 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39897 (pid=221698) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:50,403 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:42:50,667 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39240 (pid=221538) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:50,896 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:42:51,337 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:42:51,438 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39199 (pid=221668) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:51,587 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:34301 (pid=221651) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:51,678 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:42:51,814 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:42:52,371 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35316 (pid=221554) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:52,630 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:42:52,650 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:34801 (pid=221709) exceeded 99% memory budget. Restarting...
2022-08-04 11:42:53,543 - distributed.nanny - WARNING - Restarting worker
---------------------------------------------------------------------------
KilledWorker                              Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     87         print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' )
     88         with performance_report(filename=daskreport+"_save_"+step.Value+".html"):
---> 89             save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)                
     90 # 5. Plot       
     91     if plotswitch=='True': 

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:16, in datas(data, plot, path, filename)
     14 print('start saving data' )
     15 if 'int' in plot:
---> 16     savedfile=integral(data,path,filename)
     17     print('save computed data at',savedfile,'completed')  
     18 elif 'Mooring' in plot:

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:87, in integral(data, path, filename)
     85 print('saving data in a  csv file',filesave)
     86 #data[filename]=data
---> 87 data.to_netcdf(filesave,mode='w') 
     88 return filesave

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:1882, in Dataset.to_netcdf(self, path, mode, format, group, engine, encoding, unlimited_dims, compute, invalid_netcdf)
   1879     encoding = {}
   1880 from ..backends.api import to_netcdf
-> 1882 return to_netcdf(  # type: ignore  # mypy cannot resolve the overloads:(
   1883     self,
   1884     path,
   1885     mode=mode,
   1886     format=format,
   1887     group=group,
   1888     engine=engine,
   1889     encoding=encoding,
   1890     unlimited_dims=unlimited_dims,
   1891     compute=compute,
   1892     multifile=False,
   1893     invalid_netcdf=invalid_netcdf,
   1894 )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:1219, in to_netcdf(dataset, path_or_file, mode, format, group, engine, encoding, unlimited_dims, compute, multifile, invalid_netcdf)
   1216 if multifile:
   1217     return writer, store
-> 1219 writes = writer.sync(compute=compute)
   1221 if isinstance(target, BytesIO):
   1222     store.sync()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/common.py:168, in ArrayWriter.sync(self, compute)
    162 import dask.array as da
    164 # TODO: consider wrapping targets with dask.delayed, if this makes
    165 # for any discernible difference in perforance, e.g.,
    166 # targets = [dask.delayed(t) for t in self.targets]
--> 168 delayed_store = da.store(
    169     self.sources,
    170     self.targets,
    171     lock=self.lock,
    172     compute=compute,
    173     flush=True,
    174     regions=self.regions,
    175 )
    176 self.sources = []
    177 self.targets = []

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/core.py:1229, in store(***failed resolving arguments***)
   1227 elif compute:
   1228     store_dsk = HighLevelGraph(layers, dependencies)
-> 1229     compute_as_if_collection(Array, store_dsk, map_keys, **kwargs)
   1230     return None
   1232 else:

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:342, in compute_as_if_collection(cls, dsk, keys, scheduler, get, **kwargs)
    340 schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get)
    341 dsk2 = optimization_function(cls)(dsk, keys, **kwargs)
--> 342 return schedule(dsk2, keys, **kwargs)

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
   2999         should_rejoin = False
   3000 try:
-> 3001     results = self.gather(packed, asynchronous=asynchronous, direct=direct)
   3002 finally:
   3003     for f in futures.values():

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous)
   2173 else:
   2174     local_worker = None
-> 2175 return self.sync(
   2176     self._gather,
   2177     futures,
   2178     errors=errors,
   2179     direct=direct,
   2180     local_worker=local_worker,
   2181     asynchronous=asynchronous,
   2182 )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
    336     return future
    337 else:
--> 338     return sync(
    339         self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
    340     )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs)
    403 if error:
    404     typ, exc, tb = error
--> 405     raise exc.with_traceback(tb)
    406 else:
    407     return result

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f()
    376         future = asyncio.wait_for(future, callback_timeout)
    377     future = asyncio.ensure_future(future)
--> 378     result = yield future
    379 except Exception:
    380     error = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self)
    759 exc_info = None
    761 try:
--> 762     value = future.result()
    763 except Exception:
    764     exc_info = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker)
   2036         exc = CancelledError(key)
   2037     else:
-> 2038         raise exception.with_traceback(traceback)
   2039     raise exc
   2040 if errors == "skip":

KilledWorker: ("('open_dataset-getitem-getitem-getitem-81df9a53b6a0ff847a49fca70c0bc8bd', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:35316', name: 40, status: closed, memory: 0, processing: 2>)