In [1]:
%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()

from core import  load, zoom, calc, save,plots,monitor
In [2]:
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
Out[2]:
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>

If you submit the job with job scheduler, above¶

below are list of enviroment variable one can pass

%env local='2"¶

local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'

%env ychunk='2'¶

%env tchunk='2'¶

controls chunk. 'False' sets no modification from original netcdf file's chunk.¶

ychunk=10 will group the original netcdf file to 10 by 10¶

tchunk=1 will chunk the time coordinate one by one¶

%env file_exp=¶

'file_exp': Which 'experiment' name is it?¶

. this corresopnds to intake catalog name without path and .yaml¶

%env year=¶

for Validation, this correspoinds to path/year/month 's year¶

for monitoring, this corresponids to 'date' having * means do all files in the monitoring directory¶

setting it as 0[0-9] &1[0-9]& *[2-3][0-9], the job can be separated in three lots.¶

%env month=¶

for monitoring this corresponds to file path path-XIOS.{month}/¶

#

%env control=FWC_SSH¶

name of control file to be used for computation/plots/save/ & how it is called from Monitor.sh¶

Monitor.sh calls M_MLD_2D

and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh

  • AWTD.sh M_AWTMD

  • Fluxnet.sh M_Fluxnet

  • Siconc.sh M_Ice_quantities
  • IceClim.sh M_IceClim M_IceConce M_IceThick

FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly

Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony

%env save= proceed saving? True or False , Default is setted as True¶

%env plot= proceed plotting? True or False , Default is setted as True¶

%env calc= proceed computation? or just load computed result? True or False , Default is setted as True¶

%env save=False¶

%env lazy=False¶

For debugging this cell can help¶

%env file_exp=SEDNA_DELTA_MONITOR %env year=2012 %env month=01

0[1-2]¶

%env ychunk=10 %env ychunk=False %env save=False %env plot=True %env calc=True # %env lazy=False

False¶

%env control=M_Fluxnet

M_Sectiony ok with ychunk=False local=True lazy=False¶

In [3]:
%%time
# 'savefig': Do we save output in html? or not. keep it true. 
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True
using host= irene6169.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16
10000000000
False
tgcc local cluster starting
This code is running on  irene6169.c-irene.mg1.tgcc.ccc.cea.fr using  SEDNA_DELTA_MONITOR file experiment, read from  ../lib/SEDNA_DELTA_MONITOR.yaml  on year= 2012  on month= 01  outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419103irene6169.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_01M_IceThick/
CPU times: user 3.53 s, sys: 688 ms, total: 4.21 s
Wall time: 1min 30s
Out[3]:

Client

Client-ea15006d-13d8-11ed-81cc-080038b93ed7

Connection method: Cluster object Cluster type: distributed.LocalCluster
Dashboard: http://127.0.0.1:8787/status

Cluster Info

LocalCluster

9d799de2

Dashboard: http://127.0.0.1:8787/status Workers: 64
Total threads: 256 Total memory: 251.06 GiB
Status: running Using processes: True

Scheduler Info

Scheduler

Scheduler-1feb4789-e8a6-4eea-adab-e076f92d17bd

Comm: tcp://127.0.0.1:44574 Workers: 64
Dashboard: http://127.0.0.1:8787/status Total threads: 256
Started: 1 minute ago Total memory: 251.06 GiB

Workers

Worker: 0

Comm: tcp://127.0.0.1:38924 Total threads: 4
Dashboard: http://127.0.0.1:46772/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35495
Local directory: /tmp/dask-worker-space/worker-xcmb3vp2

Worker: 1

Comm: tcp://127.0.0.1:32967 Total threads: 4
Dashboard: http://127.0.0.1:35356/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42034
Local directory: /tmp/dask-worker-space/worker-0ebtvtr2

Worker: 2

Comm: tcp://127.0.0.1:35487 Total threads: 4
Dashboard: http://127.0.0.1:35990/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37204
Local directory: /tmp/dask-worker-space/worker-1wa18te0

Worker: 3

Comm: tcp://127.0.0.1:41789 Total threads: 4
Dashboard: http://127.0.0.1:37929/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35250
Local directory: /tmp/dask-worker-space/worker-z40954af

Worker: 4

Comm: tcp://127.0.0.1:40645 Total threads: 4
Dashboard: http://127.0.0.1:46515/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41146
Local directory: /tmp/dask-worker-space/worker-8y8z7oyt

Worker: 5

Comm: tcp://127.0.0.1:39162 Total threads: 4
Dashboard: http://127.0.0.1:46078/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33316
Local directory: /tmp/dask-worker-space/worker-6nq9sp3j

Worker: 6

Comm: tcp://127.0.0.1:39490 Total threads: 4
Dashboard: http://127.0.0.1:41044/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44718
Local directory: /tmp/dask-worker-space/worker-svdoqph7

Worker: 7

Comm: tcp://127.0.0.1:33427 Total threads: 4
Dashboard: http://127.0.0.1:43494/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39098
Local directory: /tmp/dask-worker-space/worker-227dqmvs

Worker: 8

Comm: tcp://127.0.0.1:37164 Total threads: 4
Dashboard: http://127.0.0.1:38636/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41940
Local directory: /tmp/dask-worker-space/worker-ubcznbqd

Worker: 9

Comm: tcp://127.0.0.1:33438 Total threads: 4
Dashboard: http://127.0.0.1:39464/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40647
Local directory: /tmp/dask-worker-space/worker-74ltq37y

Worker: 10

Comm: tcp://127.0.0.1:34852 Total threads: 4
Dashboard: http://127.0.0.1:34736/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39867
Local directory: /tmp/dask-worker-space/worker-j47pgfft

Worker: 11

Comm: tcp://127.0.0.1:44715 Total threads: 4
Dashboard: http://127.0.0.1:35706/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35908
Local directory: /tmp/dask-worker-space/worker-djdsnahv

Worker: 12

Comm: tcp://127.0.0.1:40561 Total threads: 4
Dashboard: http://127.0.0.1:40883/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36505
Local directory: /tmp/dask-worker-space/worker-hczq0gla

Worker: 13

Comm: tcp://127.0.0.1:45045 Total threads: 4
Dashboard: http://127.0.0.1:44868/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38398
Local directory: /tmp/dask-worker-space/worker-5kzt384j

Worker: 14

Comm: tcp://127.0.0.1:46268 Total threads: 4
Dashboard: http://127.0.0.1:32790/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38513
Local directory: /tmp/dask-worker-space/worker-1o7gfpt9

Worker: 15

Comm: tcp://127.0.0.1:44852 Total threads: 4
Dashboard: http://127.0.0.1:46781/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40048
Local directory: /tmp/dask-worker-space/worker-_itup2ay

Worker: 16

Comm: tcp://127.0.0.1:35281 Total threads: 4
Dashboard: http://127.0.0.1:44990/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43419
Local directory: /tmp/dask-worker-space/worker-88bxxu0i

Worker: 17

Comm: tcp://127.0.0.1:36008 Total threads: 4
Dashboard: http://127.0.0.1:33755/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37023
Local directory: /tmp/dask-worker-space/worker-qh0d02bz

Worker: 18

Comm: tcp://127.0.0.1:38528 Total threads: 4
Dashboard: http://127.0.0.1:34895/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46290
Local directory: /tmp/dask-worker-space/worker-fwgdtpr2

Worker: 19

Comm: tcp://127.0.0.1:40733 Total threads: 4
Dashboard: http://127.0.0.1:42121/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:43763
Local directory: /tmp/dask-worker-space/worker-7gzuv159

Worker: 20

Comm: tcp://127.0.0.1:34032 Total threads: 4
Dashboard: http://127.0.0.1:44675/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:32837
Local directory: /tmp/dask-worker-space/worker-dgn3mz_p

Worker: 21

Comm: tcp://127.0.0.1:35115 Total threads: 4
Dashboard: http://127.0.0.1:38109/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45875
Local directory: /tmp/dask-worker-space/worker-j_33rkc_

Worker: 22

Comm: tcp://127.0.0.1:37262 Total threads: 4
Dashboard: http://127.0.0.1:34774/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34092
Local directory: /tmp/dask-worker-space/worker-ho6f5ii5

Worker: 23

Comm: tcp://127.0.0.1:34451 Total threads: 4
Dashboard: http://127.0.0.1:35152/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33915
Local directory: /tmp/dask-worker-space/worker-ui38b0sm

Worker: 24

Comm: tcp://127.0.0.1:43173 Total threads: 4
Dashboard: http://127.0.0.1:40879/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39353
Local directory: /tmp/dask-worker-space/worker-2zrxcv9h

Worker: 25

Comm: tcp://127.0.0.1:41688 Total threads: 4
Dashboard: http://127.0.0.1:33617/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46798
Local directory: /tmp/dask-worker-space/worker-3szg7ozd

Worker: 26

Comm: tcp://127.0.0.1:45743 Total threads: 4
Dashboard: http://127.0.0.1:35194/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39617
Local directory: /tmp/dask-worker-space/worker-3ja75g9_

Worker: 27

Comm: tcp://127.0.0.1:38266 Total threads: 4
Dashboard: http://127.0.0.1:33595/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38555
Local directory: /tmp/dask-worker-space/worker-am9g7h8h

Worker: 28

Comm: tcp://127.0.0.1:44477 Total threads: 4
Dashboard: http://127.0.0.1:33442/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35135
Local directory: /tmp/dask-worker-space/worker-__osu0ey

Worker: 29

Comm: tcp://127.0.0.1:41308 Total threads: 4
Dashboard: http://127.0.0.1:37865/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44352
Local directory: /tmp/dask-worker-space/worker-knyi3062

Worker: 30

Comm: tcp://127.0.0.1:34743 Total threads: 4
Dashboard: http://127.0.0.1:43601/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36779
Local directory: /tmp/dask-worker-space/worker-ntcr_w0l

Worker: 31

Comm: tcp://127.0.0.1:45260 Total threads: 4
Dashboard: http://127.0.0.1:37771/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45480
Local directory: /tmp/dask-worker-space/worker-xc7mudt4

Worker: 32

Comm: tcp://127.0.0.1:39299 Total threads: 4
Dashboard: http://127.0.0.1:40578/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41232
Local directory: /tmp/dask-worker-space/worker-r4gdbuds

Worker: 33

Comm: tcp://127.0.0.1:44808 Total threads: 4
Dashboard: http://127.0.0.1:33378/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:44063
Local directory: /tmp/dask-worker-space/worker-l2_te1hr

Worker: 34

Comm: tcp://127.0.0.1:43416 Total threads: 4
Dashboard: http://127.0.0.1:33493/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40188
Local directory: /tmp/dask-worker-space/worker-zy81l85u

Worker: 35

Comm: tcp://127.0.0.1:40189 Total threads: 4
Dashboard: http://127.0.0.1:38777/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35769
Local directory: /tmp/dask-worker-space/worker-9wmtg4rn

Worker: 36

Comm: tcp://127.0.0.1:38364 Total threads: 4
Dashboard: http://127.0.0.1:36494/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35530
Local directory: /tmp/dask-worker-space/worker-s08v6tpb

Worker: 37

Comm: tcp://127.0.0.1:33560 Total threads: 4
Dashboard: http://127.0.0.1:33696/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:41784
Local directory: /tmp/dask-worker-space/worker-ux7u92f2

Worker: 38

Comm: tcp://127.0.0.1:33766 Total threads: 4
Dashboard: http://127.0.0.1:38514/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35731
Local directory: /tmp/dask-worker-space/worker-_bk3ylgw

Worker: 39

Comm: tcp://127.0.0.1:46771 Total threads: 4
Dashboard: http://127.0.0.1:45144/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:36258
Local directory: /tmp/dask-worker-space/worker-z93lo2yx

Worker: 40

Comm: tcp://127.0.0.1:38127 Total threads: 4
Dashboard: http://127.0.0.1:34188/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40296
Local directory: /tmp/dask-worker-space/worker-iq_dsmet

Worker: 41

Comm: tcp://127.0.0.1:34564 Total threads: 4
Dashboard: http://127.0.0.1:37725/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:38730
Local directory: /tmp/dask-worker-space/worker-_2nl29sb

Worker: 42

Comm: tcp://127.0.0.1:43586 Total threads: 4
Dashboard: http://127.0.0.1:42073/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34409
Local directory: /tmp/dask-worker-space/worker-r1u05iaw

Worker: 43

Comm: tcp://127.0.0.1:46009 Total threads: 4
Dashboard: http://127.0.0.1:34850/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46624
Local directory: /tmp/dask-worker-space/worker-6evtt0u5

Worker: 44

Comm: tcp://127.0.0.1:40712 Total threads: 4
Dashboard: http://127.0.0.1:41885/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37998
Local directory: /tmp/dask-worker-space/worker-a0b7frd5

Worker: 45

Comm: tcp://127.0.0.1:44441 Total threads: 4
Dashboard: http://127.0.0.1:41055/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45517
Local directory: /tmp/dask-worker-space/worker-oiv89nk6

Worker: 46

Comm: tcp://127.0.0.1:32815 Total threads: 4
Dashboard: http://127.0.0.1:36597/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:37507
Local directory: /tmp/dask-worker-space/worker-fvgvq1pp

Worker: 47

Comm: tcp://127.0.0.1:46602 Total threads: 4
Dashboard: http://127.0.0.1:34461/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40644
Local directory: /tmp/dask-worker-space/worker-wq_ieg_a

Worker: 48

Comm: tcp://127.0.0.1:36236 Total threads: 4
Dashboard: http://127.0.0.1:46428/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35332
Local directory: /tmp/dask-worker-space/worker-t_ny6hj2

Worker: 49

Comm: tcp://127.0.0.1:34971 Total threads: 4
Dashboard: http://127.0.0.1:39408/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34018
Local directory: /tmp/dask-worker-space/worker-yk1ml9sp

Worker: 50

Comm: tcp://127.0.0.1:44395 Total threads: 4
Dashboard: http://127.0.0.1:39292/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40853
Local directory: /tmp/dask-worker-space/worker-ljxyv5g1

Worker: 51

Comm: tcp://127.0.0.1:42872 Total threads: 4
Dashboard: http://127.0.0.1:40668/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35890
Local directory: /tmp/dask-worker-space/worker-4noeo3n4

Worker: 52

Comm: tcp://127.0.0.1:34696 Total threads: 4
Dashboard: http://127.0.0.1:40944/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35457
Local directory: /tmp/dask-worker-space/worker-48atxnlg

Worker: 53

Comm: tcp://127.0.0.1:34520 Total threads: 4
Dashboard: http://127.0.0.1:38202/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:45428
Local directory: /tmp/dask-worker-space/worker-o8pt96k5

Worker: 54

Comm: tcp://127.0.0.1:34251 Total threads: 4
Dashboard: http://127.0.0.1:41988/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40788
Local directory: /tmp/dask-worker-space/worker-7y5q7oxg

Worker: 55

Comm: tcp://127.0.0.1:35391 Total threads: 4
Dashboard: http://127.0.0.1:46571/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:39712
Local directory: /tmp/dask-worker-space/worker-x8_1zeod

Worker: 56

Comm: tcp://127.0.0.1:42730 Total threads: 4
Dashboard: http://127.0.0.1:43449/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35306
Local directory: /tmp/dask-worker-space/worker-8cqpgcy2

Worker: 57

Comm: tcp://127.0.0.1:36957 Total threads: 4
Dashboard: http://127.0.0.1:33859/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:35762
Local directory: /tmp/dask-worker-space/worker-ow130t0w

Worker: 58

Comm: tcp://127.0.0.1:39868 Total threads: 4
Dashboard: http://127.0.0.1:39956/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:33222
Local directory: /tmp/dask-worker-space/worker-mn67dpzg

Worker: 59

Comm: tcp://127.0.0.1:38219 Total threads: 4
Dashboard: http://127.0.0.1:33008/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:40510
Local directory: /tmp/dask-worker-space/worker-f8gv3tii

Worker: 60

Comm: tcp://127.0.0.1:42886 Total threads: 4
Dashboard: http://127.0.0.1:45633/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:42806
Local directory: /tmp/dask-worker-space/worker-mq7nfw4f

Worker: 61

Comm: tcp://127.0.0.1:39316 Total threads: 4
Dashboard: http://127.0.0.1:36693/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34045
Local directory: /tmp/dask-worker-space/worker-ivipie8r

Worker: 62

Comm: tcp://127.0.0.1:33195 Total threads: 4
Dashboard: http://127.0.0.1:45866/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:34390
Local directory: /tmp/dask-worker-space/worker-9m2k3j0d

Worker: 63

Comm: tcp://127.0.0.1:40509 Total threads: 4
Dashboard: http://127.0.0.1:39469/status Memory: 3.92 GiB
Nanny: tcp://127.0.0.1:46680
Local directory: /tmp/dask-worker-space/worker-lfwgbelb

read plotting information from a csv file¶

In [4]:
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Out[4]:
Value Inputs Equation Zone Plot Colourmap MinMax Unit Oldname Unnamed: 10
IceThickness icemod.sivolu (data.sivolu.where(data.sivolu >0)).to_dataset... ALL maps Spectral (0,5) m M-4

Computation starts here¶

Each computation consists of

  1. Load NEMO data set
  2. Zoom data set
  3. Compute (or load computed data set)
  4. Save
  5. Plot
  6. Close
In [5]:
%%time
import os
calcswitch=os.environ.get('calc', 'True') 
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any()) 
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0 
data
calcswitch= True df.Inputs != nothing True lazy= True
../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  ../lib/SEDNA_DELTA_MONITOR.yaml
using param_xios reading  <bound method DataSourceBase.describe of sources:
  param_xios:
    args:
      combine: nested
      concat_dim: y
      urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc
      xarray_kwargs:
        compat: override
        coords: minimal
        data_vars: minimal
        parallel: true
    description: SEDNA NEMO parameters from MPI output  nav_lon lat fails
    driver: intake_xarray.netcdf.NetCDFSource
    metadata:
      catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/
>
{'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output  nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}}
0 read icemod ['sivolu']
lazy= True
using load_data_xios reading  icemod
using load_data_xios reading  {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}}
      took 246.5845160484314 seconds
0 merging icemod ['sivolu']
param nav_lat will be included in data
param nav_lon will be included in data
param mask2d will be included in data
ychunk= 10 calldatas_y_rechunk
sum_num (13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12)
start rechunking with (130, 122, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 48)
end of y_rechunk
CPU times: user 1min 54s, sys: 25.1 s, total: 2min 19s
Wall time: 4min 59s
Out[5]:
<xarray.Dataset>
Dimensions:        (t: 31, y: 6540, x: 6560)
Coordinates:
  * t              (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00
  * y              (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540
  * x              (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560
    nav_lat        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    nav_lon        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    time_centered  (t) object dask.array<chunksize=(31,), meta=np.ndarray>
    mask2d         (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray>
Data variables:
    sivolu         (t, y, x) float32 dask.array<chunksize=(31, 130, 6560), meta=np.ndarray>
Attributes: (12/26)
    name:                    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN...
    description:             ice variables
    title:                   ice variables
    Conventions:             CF-1.6
    timeStamp:               2022-Jan-17 19:00:05 GMT
    uuid:                    65f78891-6a37-4a91-8ad4-7c8b5dc0d456
    ...                      ...
    start_date:              20090101
    output_frequency:        1d
    CONFIG:                  SEDNA
    CASE:                    DELTA
    history:                 Tue Jan 18 17:20:08 2022: ncks -4 -L 1 SEDNA-DEL...
    NCO:                     netCDF Operators version 4.9.1 (Homepage = http:...
xarray.Dataset
    • t: 31
    • y: 6540
    • x: 6560
    • t
      (t)
      object
      2012-01-01 12:00:00 ... 2012-01-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 1, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 28, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 29, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 30, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 31, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • y
      (y)
      int64
      1 2 3 4 5 ... 6537 6538 6539 6540
      array([   1,    2,    3, ..., 6538, 6539, 6540])
    • x
      (x)
      int64
      1 2 3 4 5 ... 6557 6558 6559 6560
      array([   1,    2,    3, ..., 6558, 6559, 6560])
    • nav_lat
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 1687 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • nav_lon
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 1687 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • time_centered
      (t)
      object
      dask.array<chunksize=(31,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 2 Tasks 1 Chunks
      Type object numpy.ndarray
      31 1
    • mask2d
      (y, x)
      bool
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 40.91 MiB 832.81 kiB
      Shape (6540, 6560) (130, 6560)
      Count 1687 Tasks 55 Chunks
      Type bool numpy.ndarray
      6560 6540
    • sivolu
      (t, y, x)
      float32
      dask.array<chunksize=(31, 130, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_thickness
      long_name :
      ice volume
      units :
      m
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.95 GiB 100.85 MiB
      Shape (31, 6540, 6560) (31, 130, 6560)
      Count 1156 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540 31
  • name :
    /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUNS/SEDNA-DELTA-XIOS.46/SEDNA-DELTA_1d_icemod
    description :
    ice variables
    title :
    ice variables
    Conventions :
    CF-1.6
    timeStamp :
    2022-Jan-17 19:00:05 GMT
    uuid :
    65f78891-6a37-4a91-8ad4-7c8b5dc0d456
    ibegin :
    0
    ni :
    6560
    jbegin :
    0
    nj :
    13
    DOMAIN_number_total :
    544
    DOMAIN_number :
    0
    DOMAIN_dimensions_ids :
    [2 3]
    DOMAIN_size_global :
    [6560 6540]
    DOMAIN_size_local :
    [6560 13]
    DOMAIN_position_first :
    [1 1]
    DOMAIN_position_last :
    [6560 13]
    DOMAIN_halo_size_start :
    [0 0]
    DOMAIN_halo_size_end :
    [0 0]
    DOMAIN_type :
    box
    start_date :
    20090101
    output_frequency :
    1d
    CONFIG :
    SEDNA
    CASE :
    DELTA
    history :
    Tue Jan 18 17:20:08 2022: ncks -4 -L 1 SEDNA-DELTA_1d_icemod_201201-201201_NOZIP_0000.nc /ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d/2012/01/SEDNA-DELTA_1d_icemod_201201-201201_0000.nc Tue Jan 18 17:20:02 2022: ncrcat -n 31,2,1 SEDNA-DELTA_1d_icemod_0000_01.nc SEDNA-DELTA_1d_icemod_201201-201201_NOZIP_0000.nc
    NCO :
    netCDF Operators version 4.9.1 (Homepage = http://nco.sf.net, Code = http://github.com/nco/nco)
In [6]:
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
            )
#calc= True
#save= True
#plot= False
Value='IceThickness'
Zone='ALL'
Plot='maps'
cmap='Spectral'
clabel='m'
clim= (0, 5)
outputpath='../results/SEDNA_DELTA_MONITOR/'
nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/'
filename='SEDNA_maps_ALL_IceThickness'
data=monitor.optimize_dataset(data)
#3 Start computing 
data= (data.sivolu.where(data.sivolu >0)).to_dataset(name='sivolu').chunk({ 't': -1 }).unify_chunks().persist()
monitor.optimize_dataset(data)
add optimise here once otimise can recognise
<xarray.Dataset>
Dimensions:        (t: 31, y: 6540, x: 6560)
Coordinates:
  * t              (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00
  * y              (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540
  * x              (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560
    nav_lat        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    nav_lon        (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray>
    time_centered  (t) object dask.array<chunksize=(31,), meta=np.ndarray>
    mask2d         (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray>
Data variables:
    sivolu         (t, y, x) float32 dask.array<chunksize=(31, 130, 6560), meta=np.ndarray>
xarray.Dataset
    • t: 31
    • y: 6540
    • x: 6560
    • t
      (t)
      object
      2012-01-01 12:00:00 ... 2012-01-...
      axis :
      T
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_counter_bounds
      array([cftime.DatetimeNoLeap(2012, 1, 1, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 2, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 3, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 4, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 5, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 6, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 7, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 8, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 9, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 10, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 11, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 12, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 13, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 14, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 15, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 16, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 17, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 18, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 19, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 20, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 21, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 22, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 23, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 24, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 25, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 26, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 27, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 28, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 29, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 30, 12, 0, 0, 0, has_year_zero=True),
             cftime.DatetimeNoLeap(2012, 1, 31, 12, 0, 0, 0, has_year_zero=True)],
            dtype=object)
    • y
      (y)
      int64
      1 2 3 4 5 ... 6537 6538 6539 6540
      array([   1,    2,    3, ..., 6538, 6539, 6540])
    • x
      (x)
      int64
      1 2 3 4 5 ... 6557 6558 6559 6560
      array([   1,    2,    3, ..., 6558, 6559, 6560])
    • nav_lat
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 55 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • nav_lon
      (y, x)
      float32
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 163.66 MiB 3.25 MiB
      Shape (6540, 6560) (130, 6560)
      Count 55 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540
    • time_centered
      (t)
      object
      dask.array<chunksize=(31,), meta=np.ndarray>
      standard_name :
      time
      long_name :
      Time axis
      time_origin :
      1900-01-01 00:00:00
      bounds :
      time_centered_bounds
      Array Chunk
      Bytes 248 B 248 B
      Shape (31,) (31,)
      Count 1 Tasks 1 Chunks
      Type object numpy.ndarray
      31 1
    • mask2d
      (y, x)
      bool
      dask.array<chunksize=(130, 6560), meta=np.ndarray>
      Array Chunk
      Bytes 40.91 MiB 832.81 kiB
      Shape (6540, 6560) (130, 6560)
      Count 55 Tasks 55 Chunks
      Type bool numpy.ndarray
      6560 6540
    • sivolu
      (t, y, x)
      float32
      dask.array<chunksize=(31, 130, 6560), meta=np.ndarray>
      standard_name :
      sea_ice_thickness
      long_name :
      ice volume
      units :
      m
      online_operation :
      average
      interval_operation :
      40 s
      interval_write :
      1 d
      cell_methods :
      time: mean (interval: 40 s)
      Array Chunk
      Bytes 4.95 GiB 100.85 MiB
      Shape (31, 6540, 6560) (31, 130, 6560)
      Count 55 Tasks 55 Chunks
      Type float32 numpy.ndarray
      6560 6540 31
#4 Saving  SEDNA_maps_ALL_IceThickness
data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)
start saving data
saving data in a file
t (31,)
0
slice(0, 31, None)
2022-08-04 11:41:31,001 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.74 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:41:31,108 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:32815 (pid=98914) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:31,317 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:32815
Traceback (most recent call last):
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer
    bytes_read = self.read_from_fd(buf)
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd
    return self.socket.recv_into(buf, len(buf))
ConnectionResetError: [Errno 104] Connection reset by peer

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect
    handshake = await asyncio.wait_for(comm.read(), time_left())
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for
    return fut.result()
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read
    convert_stream_closed_error(self, e)
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error
    raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc
distributed.comm.core.CommClosedError: in <TCP (closed)  local=tcp://127.0.0.1:57412 remote=tcp://127.0.0.1:32815>: ConnectionResetError: [Errno 104] Connection reset by peer

The above exception was the direct cause of the following exception:

Traceback (most recent call last):
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep
    response = await get_data_from_worker(
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker
    return await retry_operation(_get_data, operation="get_data_from_worker")
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation
    return await retry(
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry
    return await coro()
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data
    comm = await rpc.connect(worker)
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect
    return await connect_attempt
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect
    comm = await connect(
  File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect
    raise OSError(
OSError: Timed out during handshake while connecting to tcp://127.0.0.1:32815 after 30 s
2022-08-04 11:41:31,339 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:41:32,228 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:41:32,333 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:42872 (pid=98919) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:32,543 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:41:33,423 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.72 GiB -- Worker memory limit: 3.92 GiB
2022-08-04 11:41:33,545 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:40733 (pid=99025) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:33,752 - distributed.nanny - WARNING - Restarting worker
2022-08-04 11:41:34,745 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:42886 (pid=99036) exceeded 99% memory budget. Restarting...
2022-08-04 11:41:34,996 - distributed.nanny - WARNING - Restarting worker
---------------------------------------------------------------------------
KilledWorker                              Traceback (most recent call last)
File <timed eval>:1, in <module>

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp)
     87         print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' )
     88         with performance_report(filename=daskreport+"_save_"+step.Value+".html"):
---> 89             save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)                
     90 # 5. Plot       
     91     if plotswitch=='True': 

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:24, in datas(data, plot, path, filename)
     22     twoD(data,path,filename,nested=False)
     23 else :
---> 24     twoD(data,path,filename)
     25 return None

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:58, in twoD(data, path, filename, nested)
     56 print('saving data in a file')
     57 filesave=path+filename  
---> 58 return to_mfnetcdf_map(data,prefix=filesave, nested=nested)

File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:229, in to_mfnetcdf_map(ds, prefix, nested)
    223     template=ds.isel(t=i)
    224     mapped=xr.map_blocks(
    225         create_eachfile, template
    226         ,kwargs=dict(prefix=prefix,nested=nested)
    227         ,template=template
    228         )  
--> 229     mapped.compute()
    231 return mapped

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:899, in Dataset.compute(self, **kwargs)
    880 """Manually trigger loading and/or computation of this dataset's data
    881 from disk or a remote source into memory and return a new dataset.
    882 Unlike load, the original dataset is left unaltered.
   (...)
    896 dask.compute
    897 """
    898 new = self.copy(deep=False)
--> 899 return new.load(**kwargs)

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:733, in Dataset.load(self, **kwargs)
    730 import dask.array as da
    732 # evaluate all the dask arrays simultaneously
--> 733 evaluated_data = da.compute(*lazy_data.values(), **kwargs)
    735 for k, data in zip(lazy_data, evaluated_data):
    736     self.variables[k].data = data

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:598, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs)
    595     keys.append(x.__dask_keys__())
    596     postcomputes.append(x.__dask_postcompute__())
--> 598 results = schedule(dsk, keys, **kwargs)
    599 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)])

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs)
   2999         should_rejoin = False
   3000 try:
-> 3001     results = self.gather(packed, asynchronous=asynchronous, direct=direct)
   3002 finally:
   3003     for f in futures.values():

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous)
   2173 else:
   2174     local_worker = None
-> 2175 return self.sync(
   2176     self._gather,
   2177     futures,
   2178     errors=errors,
   2179     direct=direct,
   2180     local_worker=local_worker,
   2181     asynchronous=asynchronous,
   2182 )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs)
    336     return future
    337 else:
--> 338     return sync(
    339         self.loop, func, *args, callback_timeout=callback_timeout, **kwargs
    340     )

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs)
    403 if error:
    404     typ, exc, tb = error
--> 405     raise exc.with_traceback(tb)
    406 else:
    407     return result

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f()
    376         future = asyncio.wait_for(future, callback_timeout)
    377     future = asyncio.ensure_future(future)
--> 378     result = yield future
    379 except Exception:
    380     error = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self)
    759 exc_info = None
    761 try:
--> 762     value = future.result()
    763 except Exception:
    764     exc_info = sys.exc_info()

File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker)
   2036         exc = CancelledError(key)
   2037     else:
-> 2038         raise exception.with_traceback(traceback)
   2039     raise exc
   2040 if errors == "skip":

KilledWorker: ("('open_dataset-getitem-getitem-c78ae9ab328e9d5703486e56f681beb0', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:42886', name: 60, status: closed, memory: 0, processing: 1>)