%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
below are list of enviroment variable one can pass
local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'
#
Monitor.sh calls M_MLD_2D
and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh
AWTD.sh M_AWTMD
Fluxnet.sh M_Fluxnet
FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly
Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene4875.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 False tgcc local cluster starting This code is running on irene4875.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 02 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419117irene4875.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_Ice_quantities/ CPU times: user 3.73 s, sys: 754 ms, total: 4.49 s Wall time: 1min 37s
Client-eef84e02-13d8-11ed-a0f9-080038b93b95
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
615d8441
Dashboard: http://127.0.0.1:8787/status | Workers: 64 |
Total threads: 256 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-8a95abe9-46fc-408f-8510-75c7fcb1d6f5
Comm: tcp://127.0.0.1:39619 | Workers: 64 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 256 |
Started: 1 minute ago | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:33959 | Total threads: 4 |
Dashboard: http://127.0.0.1:34963/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41896 | |
Local directory: /tmp/dask-worker-space/worker-spentur8 |
Comm: tcp://127.0.0.1:32962 | Total threads: 4 |
Dashboard: http://127.0.0.1:34010/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37252 | |
Local directory: /tmp/dask-worker-space/worker-iyvi0y56 |
Comm: tcp://127.0.0.1:45520 | Total threads: 4 |
Dashboard: http://127.0.0.1:39383/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34051 | |
Local directory: /tmp/dask-worker-space/worker-bwizfekm |
Comm: tcp://127.0.0.1:35356 | Total threads: 4 |
Dashboard: http://127.0.0.1:36805/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38167 | |
Local directory: /tmp/dask-worker-space/worker-k2no8yax |
Comm: tcp://127.0.0.1:38646 | Total threads: 4 |
Dashboard: http://127.0.0.1:46224/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40989 | |
Local directory: /tmp/dask-worker-space/worker-bux2yt20 |
Comm: tcp://127.0.0.1:33870 | Total threads: 4 |
Dashboard: http://127.0.0.1:34088/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38520 | |
Local directory: /tmp/dask-worker-space/worker-fjbo9fm2 |
Comm: tcp://127.0.0.1:43474 | Total threads: 4 |
Dashboard: http://127.0.0.1:43453/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35347 | |
Local directory: /tmp/dask-worker-space/worker-k8w83tjo |
Comm: tcp://127.0.0.1:45092 | Total threads: 4 |
Dashboard: http://127.0.0.1:34874/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37556 | |
Local directory: /tmp/dask-worker-space/worker-ycoh620o |
Comm: tcp://127.0.0.1:34209 | Total threads: 4 |
Dashboard: http://127.0.0.1:38266/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43462 | |
Local directory: /tmp/dask-worker-space/worker-9aco8da4 |
Comm: tcp://127.0.0.1:38028 | Total threads: 4 |
Dashboard: http://127.0.0.1:35331/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36538 | |
Local directory: /tmp/dask-worker-space/worker-hys9k1n2 |
Comm: tcp://127.0.0.1:36732 | Total threads: 4 |
Dashboard: http://127.0.0.1:35513/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35610 | |
Local directory: /tmp/dask-worker-space/worker-vcpwprnr |
Comm: tcp://127.0.0.1:40142 | Total threads: 4 |
Dashboard: http://127.0.0.1:42865/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35651 | |
Local directory: /tmp/dask-worker-space/worker-fd9th0mz |
Comm: tcp://127.0.0.1:42261 | Total threads: 4 |
Dashboard: http://127.0.0.1:44851/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44674 | |
Local directory: /tmp/dask-worker-space/worker-gqeqgo63 |
Comm: tcp://127.0.0.1:41537 | Total threads: 4 |
Dashboard: http://127.0.0.1:43672/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41342 | |
Local directory: /tmp/dask-worker-space/worker-0on76xo5 |
Comm: tcp://127.0.0.1:38332 | Total threads: 4 |
Dashboard: http://127.0.0.1:44640/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33051 | |
Local directory: /tmp/dask-worker-space/worker-w8r8hid6 |
Comm: tcp://127.0.0.1:33567 | Total threads: 4 |
Dashboard: http://127.0.0.1:44102/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35757 | |
Local directory: /tmp/dask-worker-space/worker-6pvc5s6u |
Comm: tcp://127.0.0.1:34801 | Total threads: 4 |
Dashboard: http://127.0.0.1:45807/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45099 | |
Local directory: /tmp/dask-worker-space/worker-i6z3o5yd |
Comm: tcp://127.0.0.1:43328 | Total threads: 4 |
Dashboard: http://127.0.0.1:38857/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36295 | |
Local directory: /tmp/dask-worker-space/worker-x7_0s8gb |
Comm: tcp://127.0.0.1:39804 | Total threads: 4 |
Dashboard: http://127.0.0.1:37915/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43460 | |
Local directory: /tmp/dask-worker-space/worker-ni08fyo6 |
Comm: tcp://127.0.0.1:35330 | Total threads: 4 |
Dashboard: http://127.0.0.1:41139/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35345 | |
Local directory: /tmp/dask-worker-space/worker-s57cyqb2 |
Comm: tcp://127.0.0.1:39304 | Total threads: 4 |
Dashboard: http://127.0.0.1:42437/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39907 | |
Local directory: /tmp/dask-worker-space/worker-1h13k3fv |
Comm: tcp://127.0.0.1:45248 | Total threads: 4 |
Dashboard: http://127.0.0.1:40679/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42276 | |
Local directory: /tmp/dask-worker-space/worker-jeyi7yk0 |
Comm: tcp://127.0.0.1:43947 | Total threads: 4 |
Dashboard: http://127.0.0.1:33291/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38499 | |
Local directory: /tmp/dask-worker-space/worker-69sq7dwo |
Comm: tcp://127.0.0.1:41257 | Total threads: 4 |
Dashboard: http://127.0.0.1:36517/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43713 | |
Local directory: /tmp/dask-worker-space/worker-skc8wc0w |
Comm: tcp://127.0.0.1:39240 | Total threads: 4 |
Dashboard: http://127.0.0.1:33357/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36812 | |
Local directory: /tmp/dask-worker-space/worker-tt91b_5u |
Comm: tcp://127.0.0.1:44489 | Total threads: 4 |
Dashboard: http://127.0.0.1:34274/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35476 | |
Local directory: /tmp/dask-worker-space/worker-os4u8k1v |
Comm: tcp://127.0.0.1:39687 | Total threads: 4 |
Dashboard: http://127.0.0.1:33754/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43803 | |
Local directory: /tmp/dask-worker-space/worker-ymvswu3a |
Comm: tcp://127.0.0.1:39282 | Total threads: 4 |
Dashboard: http://127.0.0.1:37582/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42699 | |
Local directory: /tmp/dask-worker-space/worker-q0wp130e |
Comm: tcp://127.0.0.1:46687 | Total threads: 4 |
Dashboard: http://127.0.0.1:41431/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43518 | |
Local directory: /tmp/dask-worker-space/worker-cwbqkw5w |
Comm: tcp://127.0.0.1:38063 | Total threads: 4 |
Dashboard: http://127.0.0.1:39855/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41175 | |
Local directory: /tmp/dask-worker-space/worker-bckahikt |
Comm: tcp://127.0.0.1:39545 | Total threads: 4 |
Dashboard: http://127.0.0.1:33068/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44252 | |
Local directory: /tmp/dask-worker-space/worker-u1i0jdfj |
Comm: tcp://127.0.0.1:39524 | Total threads: 4 |
Dashboard: http://127.0.0.1:46178/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33213 | |
Local directory: /tmp/dask-worker-space/worker-u61mqzjo |
Comm: tcp://127.0.0.1:41409 | Total threads: 4 |
Dashboard: http://127.0.0.1:45143/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43984 | |
Local directory: /tmp/dask-worker-space/worker-_4lzkhxl |
Comm: tcp://127.0.0.1:36171 | Total threads: 4 |
Dashboard: http://127.0.0.1:39829/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33967 | |
Local directory: /tmp/dask-worker-space/worker-ko33tr37 |
Comm: tcp://127.0.0.1:42180 | Total threads: 4 |
Dashboard: http://127.0.0.1:42970/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:46332 | |
Local directory: /tmp/dask-worker-space/worker-slcbsts8 |
Comm: tcp://127.0.0.1:34301 | Total threads: 4 |
Dashboard: http://127.0.0.1:38248/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41271 | |
Local directory: /tmp/dask-worker-space/worker-y1kmbg0b |
Comm: tcp://127.0.0.1:40139 | Total threads: 4 |
Dashboard: http://127.0.0.1:45292/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34674 | |
Local directory: /tmp/dask-worker-space/worker-6cbs6cid |
Comm: tcp://127.0.0.1:41501 | Total threads: 4 |
Dashboard: http://127.0.0.1:33495/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39340 | |
Local directory: /tmp/dask-worker-space/worker-h0wfvj6l |
Comm: tcp://127.0.0.1:35672 | Total threads: 4 |
Dashboard: http://127.0.0.1:44872/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38390 | |
Local directory: /tmp/dask-worker-space/worker-iaaayrmi |
Comm: tcp://127.0.0.1:42034 | Total threads: 4 |
Dashboard: http://127.0.0.1:41456/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43058 | |
Local directory: /tmp/dask-worker-space/worker-ms2gvv4t |
Comm: tcp://127.0.0.1:35316 | Total threads: 4 |
Dashboard: http://127.0.0.1:39681/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38372 | |
Local directory: /tmp/dask-worker-space/worker-okyp858m |
Comm: tcp://127.0.0.1:37064 | Total threads: 4 |
Dashboard: http://127.0.0.1:36288/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35073 | |
Local directory: /tmp/dask-worker-space/worker-7l7jq34e |
Comm: tcp://127.0.0.1:42166 | Total threads: 4 |
Dashboard: http://127.0.0.1:44200/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35372 | |
Local directory: /tmp/dask-worker-space/worker-n6vsknj4 |
Comm: tcp://127.0.0.1:46219 | Total threads: 4 |
Dashboard: http://127.0.0.1:39588/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43113 | |
Local directory: /tmp/dask-worker-space/worker-ypyjwnzx |
Comm: tcp://127.0.0.1:42961 | Total threads: 4 |
Dashboard: http://127.0.0.1:33623/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35619 | |
Local directory: /tmp/dask-worker-space/worker-kypt3y61 |
Comm: tcp://127.0.0.1:33077 | Total threads: 4 |
Dashboard: http://127.0.0.1:37353/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34503 | |
Local directory: /tmp/dask-worker-space/worker-xowc9h_0 |
Comm: tcp://127.0.0.1:34260 | Total threads: 4 |
Dashboard: http://127.0.0.1:40899/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45911 | |
Local directory: /tmp/dask-worker-space/worker-ia5lk7ux |
Comm: tcp://127.0.0.1:33907 | Total threads: 4 |
Dashboard: http://127.0.0.1:41839/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34234 | |
Local directory: /tmp/dask-worker-space/worker-7konqyg4 |
Comm: tcp://127.0.0.1:34071 | Total threads: 4 |
Dashboard: http://127.0.0.1:43596/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45761 | |
Local directory: /tmp/dask-worker-space/worker-izciiz13 |
Comm: tcp://127.0.0.1:38600 | Total threads: 4 |
Dashboard: http://127.0.0.1:37143/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43067 | |
Local directory: /tmp/dask-worker-space/worker-9kt3z8pn |
Comm: tcp://127.0.0.1:44491 | Total threads: 4 |
Dashboard: http://127.0.0.1:34989/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41987 | |
Local directory: /tmp/dask-worker-space/worker-_45ngcbg |
Comm: tcp://127.0.0.1:45363 | Total threads: 4 |
Dashboard: http://127.0.0.1:37335/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36022 | |
Local directory: /tmp/dask-worker-space/worker-99ypkq3_ |
Comm: tcp://127.0.0.1:39199 | Total threads: 4 |
Dashboard: http://127.0.0.1:45291/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34446 | |
Local directory: /tmp/dask-worker-space/worker-xhwzzoj8 |
Comm: tcp://127.0.0.1:38138 | Total threads: 4 |
Dashboard: http://127.0.0.1:38544/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33435 | |
Local directory: /tmp/dask-worker-space/worker-ggnr766i |
Comm: tcp://127.0.0.1:39897 | Total threads: 4 |
Dashboard: http://127.0.0.1:37596/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42758 | |
Local directory: /tmp/dask-worker-space/worker-xrd048bp |
Comm: tcp://127.0.0.1:37146 | Total threads: 4 |
Dashboard: http://127.0.0.1:35180/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35971 | |
Local directory: /tmp/dask-worker-space/worker-olw892n1 |
Comm: tcp://127.0.0.1:39194 | Total threads: 4 |
Dashboard: http://127.0.0.1:38844/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40921 | |
Local directory: /tmp/dask-worker-space/worker-x5friyi_ |
Comm: tcp://127.0.0.1:38255 | Total threads: 4 |
Dashboard: http://127.0.0.1:46026/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44119 | |
Local directory: /tmp/dask-worker-space/worker-yp48itg4 |
Comm: tcp://127.0.0.1:38097 | Total threads: 4 |
Dashboard: http://127.0.0.1:44616/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34208 | |
Local directory: /tmp/dask-worker-space/worker-0k6v4674 |
Comm: tcp://127.0.0.1:35978 | Total threads: 4 |
Dashboard: http://127.0.0.1:39305/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44682 | |
Local directory: /tmp/dask-worker-space/worker-u6wg17uw |
Comm: tcp://127.0.0.1:43857 | Total threads: 4 |
Dashboard: http://127.0.0.1:46183/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39405 | |
Local directory: /tmp/dask-worker-space/worker-9zf5g4_e |
Comm: tcp://127.0.0.1:39256 | Total threads: 4 |
Dashboard: http://127.0.0.1:33403/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35693 | |
Local directory: /tmp/dask-worker-space/worker-5q5l25_0 |
Comm: tcp://127.0.0.1:44717 | Total threads: 4 |
Dashboard: http://127.0.0.1:44573/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38162 | |
Local directory: /tmp/dask-worker-space/worker-6jqrsi4f |
Comm: tcp://127.0.0.1:42088 | Total threads: 4 |
Dashboard: http://127.0.0.1:33441/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42303 | |
Local directory: /tmp/dask-worker-space/worker-w0762nkg |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
Ice_quantities | param.e1te2t,icemod.sivelo,icemod.sivolu,icemo... | calc.Ice_quant(data) | ALL | Ice_intquant | None | (0,20) | cm s^(-1) | I-2 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= True df.Inputs != nothing True lazy= True ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading <bound method DataSourceBase.describe of sources: param_xios: args: combine: nested concat_dim: y urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc xarray_kwargs: compat: override coords: minimal data_vars: minimal parallel: true description: SEDNA NEMO parameters from MPI output nav_lon lat fails driver: intake_xarray.netcdf.NetCDFSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > {'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}} 0 read icemod ['sivolu', 'siconc', 'sivelo'] lazy= True using load_data_xios reading icemod using load_data_xios reading {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}} took 256.9019663333893 seconds 0 merging icemod ['sivolu', 'siconc', 'sivelo'] param nav_lon will be included in data param mask2d will be included in data param e1te2t will be included in data param nav_lat will be included in data CPU times: user 1min 57s, sys: 26.2 s, total: 2min 23s Wall time: 5min 14s
<xarray.Dataset> Dimensions: (t: 28, y: 6540, x: 6560) Coordinates: * t (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00 * y (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560 nav_lat (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> time_centered (t) object dask.array<chunksize=(28,), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(13, 6560), meta=np.ndarray> e1te2t (y, x) float64 dask.array<chunksize=(13, 6560), meta=np.ndarray> Data variables: sivolu (t, y, x) float32 dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray> siconc (t, y, x) float32 dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray> sivelo (t, y, x) float32 dask.array<chunksize=(28, 6540, 6560), meta=np.ndarray> Attributes: (12/26) name: /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN... description: ice variables title: ice variables Conventions: CF-1.6 timeStamp: 2022-Jan-18 16:51:17 GMT uuid: 56b165e2-bdda-4b33-a2e9-04a59f3d06e9 ... ... start_date: 20090101 output_frequency: 1d CONFIG: SEDNA CASE: DELTA history: Wed Jan 19 12:40:39 2022: ncks -4 -L 1 SEDNA-DEL... NCO: netCDF Operators version 4.9.1 (Homepage = http:...
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= True #save= True #plot= False Value='Ice_quantities' Zone='ALL' Plot='Ice_intquant' cmap='None' clabel='cm s^(-1)' clim= (0, 20) outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_Ice_intquant_ALL_Ice_quantities' data=monitor.optimize_dataset(data) #3 Start computing data= calc.Ice_quant(data) monitor.optimize_dataset(data) add optimise here once otimise can recognise
<xarray.Dataset> Dimensions: (t: 28) Coordinates: * t (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00 time_centered (t) object dask.array<chunksize=(28,), meta=np.ndarray> Data variables: Ice volume (t) float64 dask.array<chunksize=(28,), meta=np.ndarray> Ice area (t) float64 dask.array<chunksize=(28,), meta=np.ndarray> Ice extent (t) float64 dask.array<chunksize=(28,), meta=np.ndarray> Ice drift (t) float64 dask.array<chunksize=(28,), meta=np.ndarray>
#4 Saving SEDNA_Ice_intquant_ALL_Ice_quantities data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) start saving data saving data in a csv file ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_Ice_intquant_ALL_Ice_quantities2012-02-01_2012-02-28.nc
2022-08-04 11:42:49,740 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.71 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:42:49,833 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:32962 (pid=221611) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:50,059 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:42:50,136 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39897 (pid=221698) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:50,403 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:42:50,667 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39240 (pid=221538) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:50,896 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:42:51,337 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:42:51,438 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39199 (pid=221668) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:51,587 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:34301 (pid=221651) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:51,678 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:42:51,814 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:42:52,371 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35316 (pid=221554) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:52,630 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:42:52,650 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:34801 (pid=221709) exceeded 99% memory budget. Restarting... 2022-08-04 11:42:53,543 - distributed.nanny - WARNING - Restarting worker
--------------------------------------------------------------------------- KilledWorker Traceback (most recent call last) File <timed eval>:1, in <module> File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp) 87 print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' ) 88 with performance_report(filename=daskreport+"_save_"+step.Value+".html"): ---> 89 save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) 90 # 5. Plot 91 if plotswitch=='True': File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:16, in datas(data, plot, path, filename) 14 print('start saving data' ) 15 if 'int' in plot: ---> 16 savedfile=integral(data,path,filename) 17 print('save computed data at',savedfile,'completed') 18 elif 'Mooring' in plot: File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:87, in integral(data, path, filename) 85 print('saving data in a csv file',filesave) 86 #data[filename]=data ---> 87 data.to_netcdf(filesave,mode='w') 88 return filesave File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:1882, in Dataset.to_netcdf(self, path, mode, format, group, engine, encoding, unlimited_dims, compute, invalid_netcdf) 1879 encoding = {} 1880 from ..backends.api import to_netcdf -> 1882 return to_netcdf( # type: ignore # mypy cannot resolve the overloads:( 1883 self, 1884 path, 1885 mode=mode, 1886 format=format, 1887 group=group, 1888 engine=engine, 1889 encoding=encoding, 1890 unlimited_dims=unlimited_dims, 1891 compute=compute, 1892 multifile=False, 1893 invalid_netcdf=invalid_netcdf, 1894 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:1219, in to_netcdf(dataset, path_or_file, mode, format, group, engine, encoding, unlimited_dims, compute, multifile, invalid_netcdf) 1216 if multifile: 1217 return writer, store -> 1219 writes = writer.sync(compute=compute) 1221 if isinstance(target, BytesIO): 1222 store.sync() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/common.py:168, in ArrayWriter.sync(self, compute) 162 import dask.array as da 164 # TODO: consider wrapping targets with dask.delayed, if this makes 165 # for any discernible difference in perforance, e.g., 166 # targets = [dask.delayed(t) for t in self.targets] --> 168 delayed_store = da.store( 169 self.sources, 170 self.targets, 171 lock=self.lock, 172 compute=compute, 173 flush=True, 174 regions=self.regions, 175 ) 176 self.sources = [] 177 self.targets = [] File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/core.py:1229, in store(***failed resolving arguments***) 1227 elif compute: 1228 store_dsk = HighLevelGraph(layers, dependencies) -> 1229 compute_as_if_collection(Array, store_dsk, map_keys, **kwargs) 1230 return None 1232 else: File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:342, in compute_as_if_collection(cls, dsk, keys, scheduler, get, **kwargs) 340 schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get) 341 dsk2 = optimization_function(cls)(dsk, keys, **kwargs) --> 342 return schedule(dsk2, keys, **kwargs) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs) 2999 should_rejoin = False 3000 try: -> 3001 results = self.gather(packed, asynchronous=asynchronous, direct=direct) 3002 finally: 3003 for f in futures.values(): File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous) 2173 else: 2174 local_worker = None -> 2175 return self.sync( 2176 self._gather, 2177 futures, 2178 errors=errors, 2179 direct=direct, 2180 local_worker=local_worker, 2181 asynchronous=asynchronous, 2182 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs) 336 return future 337 else: --> 338 return sync( 339 self.loop, func, *args, callback_timeout=callback_timeout, **kwargs 340 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs) 403 if error: 404 typ, exc, tb = error --> 405 raise exc.with_traceback(tb) 406 else: 407 return result File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f() 376 future = asyncio.wait_for(future, callback_timeout) 377 future = asyncio.ensure_future(future) --> 378 result = yield future 379 except Exception: 380 error = sys.exc_info() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self) 759 exc_info = None 761 try: --> 762 value = future.result() 763 except Exception: 764 exc_info = sys.exc_info() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker) 2036 exc = CancelledError(key) 2037 else: -> 2038 raise exception.with_traceback(traceback) 2039 raise exc 2040 if errors == "skip": KilledWorker: ("('open_dataset-getitem-getitem-getitem-81df9a53b6a0ff847a49fca70c0bc8bd', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:35316', name: 40, status: closed, memory: 0, processing: 2>)