%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
below are list of enviroment variable one can pass
local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'
#
Monitor.sh calls M_MLD_2D
and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh
AWTD.sh M_AWTMD
Fluxnet.sh M_Fluxnet
FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly
Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene4596.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 False tgcc local cluster starting This code is running on irene4596.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 02 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419108irene4596.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_02M_IceConce/ CPU times: user 3.74 s, sys: 695 ms, total: 4.43 s Wall time: 1min 35s
Client-ed3207ea-13d8-11ed-bf1e-080038b93cdf
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
ef4afea0
Dashboard: http://127.0.0.1:8787/status | Workers: 64 |
Total threads: 256 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-3e0f7b40-b538-494e-b908-63433439e205
Comm: tcp://127.0.0.1:40282 | Workers: 64 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 256 |
Started: 1 minute ago | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:40434 | Total threads: 4 |
Dashboard: http://127.0.0.1:38524/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38909 | |
Local directory: /tmp/dask-worker-space/worker-rz47hn7b |
Comm: tcp://127.0.0.1:43137 | Total threads: 4 |
Dashboard: http://127.0.0.1:35617/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42338 | |
Local directory: /tmp/dask-worker-space/worker-_hwyomjt |
Comm: tcp://127.0.0.1:42263 | Total threads: 4 |
Dashboard: http://127.0.0.1:43654/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33787 | |
Local directory: /tmp/dask-worker-space/worker-mrn3foyl |
Comm: tcp://127.0.0.1:36166 | Total threads: 4 |
Dashboard: http://127.0.0.1:39234/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37656 | |
Local directory: /tmp/dask-worker-space/worker-tglls2yi |
Comm: tcp://127.0.0.1:46087 | Total threads: 4 |
Dashboard: http://127.0.0.1:33309/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35128 | |
Local directory: /tmp/dask-worker-space/worker-90we731e |
Comm: tcp://127.0.0.1:41999 | Total threads: 4 |
Dashboard: http://127.0.0.1:45908/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39675 | |
Local directory: /tmp/dask-worker-space/worker-o_r0ep0c |
Comm: tcp://127.0.0.1:33831 | Total threads: 4 |
Dashboard: http://127.0.0.1:34427/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42778 | |
Local directory: /tmp/dask-worker-space/worker-d334ikdj |
Comm: tcp://127.0.0.1:43702 | Total threads: 4 |
Dashboard: http://127.0.0.1:38624/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37809 | |
Local directory: /tmp/dask-worker-space/worker-9az0o0xq |
Comm: tcp://127.0.0.1:32891 | Total threads: 4 |
Dashboard: http://127.0.0.1:44031/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34307 | |
Local directory: /tmp/dask-worker-space/worker-qiptzmf9 |
Comm: tcp://127.0.0.1:46711 | Total threads: 4 |
Dashboard: http://127.0.0.1:35516/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43195 | |
Local directory: /tmp/dask-worker-space/worker-ujunxkhm |
Comm: tcp://127.0.0.1:39012 | Total threads: 4 |
Dashboard: http://127.0.0.1:46542/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41185 | |
Local directory: /tmp/dask-worker-space/worker-ojwrak55 |
Comm: tcp://127.0.0.1:44599 | Total threads: 4 |
Dashboard: http://127.0.0.1:33728/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44121 | |
Local directory: /tmp/dask-worker-space/worker-8kt1proq |
Comm: tcp://127.0.0.1:41253 | Total threads: 4 |
Dashboard: http://127.0.0.1:36484/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40816 | |
Local directory: /tmp/dask-worker-space/worker-uzg9iamk |
Comm: tcp://127.0.0.1:37723 | Total threads: 4 |
Dashboard: http://127.0.0.1:45256/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38850 | |
Local directory: /tmp/dask-worker-space/worker-096sugab |
Comm: tcp://127.0.0.1:46442 | Total threads: 4 |
Dashboard: http://127.0.0.1:42063/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34421 | |
Local directory: /tmp/dask-worker-space/worker-_c1dghun |
Comm: tcp://127.0.0.1:39826 | Total threads: 4 |
Dashboard: http://127.0.0.1:36228/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35526 | |
Local directory: /tmp/dask-worker-space/worker-0uasj0pb |
Comm: tcp://127.0.0.1:34208 | Total threads: 4 |
Dashboard: http://127.0.0.1:34630/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45088 | |
Local directory: /tmp/dask-worker-space/worker-vn2gwzix |
Comm: tcp://127.0.0.1:41802 | Total threads: 4 |
Dashboard: http://127.0.0.1:45059/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43047 | |
Local directory: /tmp/dask-worker-space/worker-yqqrsd_z |
Comm: tcp://127.0.0.1:45197 | Total threads: 4 |
Dashboard: http://127.0.0.1:32924/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33848 | |
Local directory: /tmp/dask-worker-space/worker-b7rbuc20 |
Comm: tcp://127.0.0.1:33936 | Total threads: 4 |
Dashboard: http://127.0.0.1:38479/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44321 | |
Local directory: /tmp/dask-worker-space/worker-wu3rmsk3 |
Comm: tcp://127.0.0.1:37442 | Total threads: 4 |
Dashboard: http://127.0.0.1:44993/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38433 | |
Local directory: /tmp/dask-worker-space/worker-7mvbhhnp |
Comm: tcp://127.0.0.1:42981 | Total threads: 4 |
Dashboard: http://127.0.0.1:43206/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45657 | |
Local directory: /tmp/dask-worker-space/worker-5_i2sdr9 |
Comm: tcp://127.0.0.1:43724 | Total threads: 4 |
Dashboard: http://127.0.0.1:46667/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34895 | |
Local directory: /tmp/dask-worker-space/worker-5lmai64f |
Comm: tcp://127.0.0.1:42714 | Total threads: 4 |
Dashboard: http://127.0.0.1:34262/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43902 | |
Local directory: /tmp/dask-worker-space/worker-8gliarwj |
Comm: tcp://127.0.0.1:43377 | Total threads: 4 |
Dashboard: http://127.0.0.1:38022/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40131 | |
Local directory: /tmp/dask-worker-space/worker-c35p6bd6 |
Comm: tcp://127.0.0.1:40337 | Total threads: 4 |
Dashboard: http://127.0.0.1:41080/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35994 | |
Local directory: /tmp/dask-worker-space/worker-xvlysr36 |
Comm: tcp://127.0.0.1:43569 | Total threads: 4 |
Dashboard: http://127.0.0.1:43981/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39278 | |
Local directory: /tmp/dask-worker-space/worker-jfua2crs |
Comm: tcp://127.0.0.1:42692 | Total threads: 4 |
Dashboard: http://127.0.0.1:46753/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42178 | |
Local directory: /tmp/dask-worker-space/worker-j5x01qp6 |
Comm: tcp://127.0.0.1:33455 | Total threads: 4 |
Dashboard: http://127.0.0.1:43395/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35630 | |
Local directory: /tmp/dask-worker-space/worker-zxalx6x1 |
Comm: tcp://127.0.0.1:43194 | Total threads: 4 |
Dashboard: http://127.0.0.1:32824/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33248 | |
Local directory: /tmp/dask-worker-space/worker-h87j4_lx |
Comm: tcp://127.0.0.1:34459 | Total threads: 4 |
Dashboard: http://127.0.0.1:38122/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42703 | |
Local directory: /tmp/dask-worker-space/worker-x2zc1k2q |
Comm: tcp://127.0.0.1:40290 | Total threads: 4 |
Dashboard: http://127.0.0.1:39129/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43873 | |
Local directory: /tmp/dask-worker-space/worker-0q6bvwxt |
Comm: tcp://127.0.0.1:46167 | Total threads: 4 |
Dashboard: http://127.0.0.1:40209/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35412 | |
Local directory: /tmp/dask-worker-space/worker-3kuxtw71 |
Comm: tcp://127.0.0.1:41962 | Total threads: 4 |
Dashboard: http://127.0.0.1:37852/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41905 | |
Local directory: /tmp/dask-worker-space/worker-yb7scvfz |
Comm: tcp://127.0.0.1:43883 | Total threads: 4 |
Dashboard: http://127.0.0.1:32777/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42418 | |
Local directory: /tmp/dask-worker-space/worker-ioh5y9ro |
Comm: tcp://127.0.0.1:39592 | Total threads: 4 |
Dashboard: http://127.0.0.1:37706/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43420 | |
Local directory: /tmp/dask-worker-space/worker-5abn1ym2 |
Comm: tcp://127.0.0.1:46299 | Total threads: 4 |
Dashboard: http://127.0.0.1:34978/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40120 | |
Local directory: /tmp/dask-worker-space/worker-w5addm97 |
Comm: tcp://127.0.0.1:34857 | Total threads: 4 |
Dashboard: http://127.0.0.1:36932/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37514 | |
Local directory: /tmp/dask-worker-space/worker-81zv8s79 |
Comm: tcp://127.0.0.1:37385 | Total threads: 4 |
Dashboard: http://127.0.0.1:43196/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44492 | |
Local directory: /tmp/dask-worker-space/worker-ogu0dukr |
Comm: tcp://127.0.0.1:36215 | Total threads: 4 |
Dashboard: http://127.0.0.1:33361/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39178 | |
Local directory: /tmp/dask-worker-space/worker-72jjt7wl |
Comm: tcp://127.0.0.1:36965 | Total threads: 4 |
Dashboard: http://127.0.0.1:36009/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43069 | |
Local directory: /tmp/dask-worker-space/worker-zyk3e8vn |
Comm: tcp://127.0.0.1:41220 | Total threads: 4 |
Dashboard: http://127.0.0.1:33793/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39871 | |
Local directory: /tmp/dask-worker-space/worker-2tqkylbo |
Comm: tcp://127.0.0.1:36447 | Total threads: 4 |
Dashboard: http://127.0.0.1:43710/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33496 | |
Local directory: /tmp/dask-worker-space/worker-pdf7xnnu |
Comm: tcp://127.0.0.1:37339 | Total threads: 4 |
Dashboard: http://127.0.0.1:35201/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35094 | |
Local directory: /tmp/dask-worker-space/worker-fgqt01tu |
Comm: tcp://127.0.0.1:33442 | Total threads: 4 |
Dashboard: http://127.0.0.1:41106/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39815 | |
Local directory: /tmp/dask-worker-space/worker-wga9mi6o |
Comm: tcp://127.0.0.1:45178 | Total threads: 4 |
Dashboard: http://127.0.0.1:33897/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39128 | |
Local directory: /tmp/dask-worker-space/worker-60n3zbsi |
Comm: tcp://127.0.0.1:36753 | Total threads: 4 |
Dashboard: http://127.0.0.1:41897/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:46403 | |
Local directory: /tmp/dask-worker-space/worker-tnv7iith |
Comm: tcp://127.0.0.1:39720 | Total threads: 4 |
Dashboard: http://127.0.0.1:42658/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:32943 | |
Local directory: /tmp/dask-worker-space/worker-1kjsaf3r |
Comm: tcp://127.0.0.1:41773 | Total threads: 4 |
Dashboard: http://127.0.0.1:40893/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45400 | |
Local directory: /tmp/dask-worker-space/worker-v0hpk4tr |
Comm: tcp://127.0.0.1:40043 | Total threads: 4 |
Dashboard: http://127.0.0.1:40657/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39939 | |
Local directory: /tmp/dask-worker-space/worker-7qbq8y2q |
Comm: tcp://127.0.0.1:43505 | Total threads: 4 |
Dashboard: http://127.0.0.1:35881/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34302 | |
Local directory: /tmp/dask-worker-space/worker-qk9ydt_h |
Comm: tcp://127.0.0.1:39652 | Total threads: 4 |
Dashboard: http://127.0.0.1:38467/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34065 | |
Local directory: /tmp/dask-worker-space/worker-oocqsa0g |
Comm: tcp://127.0.0.1:38830 | Total threads: 4 |
Dashboard: http://127.0.0.1:44535/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42522 | |
Local directory: /tmp/dask-worker-space/worker-1zow3wad |
Comm: tcp://127.0.0.1:35914 | Total threads: 4 |
Dashboard: http://127.0.0.1:44906/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:46211 | |
Local directory: /tmp/dask-worker-space/worker-egaj3ae2 |
Comm: tcp://127.0.0.1:37430 | Total threads: 4 |
Dashboard: http://127.0.0.1:43048/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45143 | |
Local directory: /tmp/dask-worker-space/worker-cxj13jij |
Comm: tcp://127.0.0.1:42500 | Total threads: 4 |
Dashboard: http://127.0.0.1:34264/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38183 | |
Local directory: /tmp/dask-worker-space/worker-g3zjh8ll |
Comm: tcp://127.0.0.1:38328 | Total threads: 4 |
Dashboard: http://127.0.0.1:41918/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:46457 | |
Local directory: /tmp/dask-worker-space/worker-xwp_fn_d |
Comm: tcp://127.0.0.1:34719 | Total threads: 4 |
Dashboard: http://127.0.0.1:39119/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42979 | |
Local directory: /tmp/dask-worker-space/worker-q3uonokg |
Comm: tcp://127.0.0.1:36675 | Total threads: 4 |
Dashboard: http://127.0.0.1:33550/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42657 | |
Local directory: /tmp/dask-worker-space/worker-t53qdd3e |
Comm: tcp://127.0.0.1:33207 | Total threads: 4 |
Dashboard: http://127.0.0.1:36114/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40986 | |
Local directory: /tmp/dask-worker-space/worker-oxn9jzow |
Comm: tcp://127.0.0.1:42105 | Total threads: 4 |
Dashboard: http://127.0.0.1:37012/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36300 | |
Local directory: /tmp/dask-worker-space/worker-eoeb60ay |
Comm: tcp://127.0.0.1:40798 | Total threads: 4 |
Dashboard: http://127.0.0.1:38324/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37204 | |
Local directory: /tmp/dask-worker-space/worker-i89o675t |
Comm: tcp://127.0.0.1:32982 | Total threads: 4 |
Dashboard: http://127.0.0.1:36848/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33997 | |
Local directory: /tmp/dask-worker-space/worker-1dlgu3h0 |
Comm: tcp://127.0.0.1:40583 | Total threads: 4 |
Dashboard: http://127.0.0.1:43303/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42541 | |
Local directory: /tmp/dask-worker-space/worker-7ug5vnoj |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
IceConce | icemod.siconc | (data.siconc.where(data.siconc >0)).to_dataset... | ALL | maps | Blues | None | M-4 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= True df.Inputs != nothing True lazy= True ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading <bound method DataSourceBase.describe of sources: param_xios: args: combine: nested concat_dim: y urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc xarray_kwargs: compat: override coords: minimal data_vars: minimal parallel: true description: SEDNA NEMO parameters from MPI output nav_lon lat fails driver: intake_xarray.netcdf.NetCDFSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > {'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}} 0 read icemod ['siconc'] lazy= True using load_data_xios reading icemod using load_data_xios reading {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}} took 250.78706979751587 seconds 0 merging icemod ['siconc'] param nav_lon will be included in data param nav_lat will be included in data param mask2d will be included in data ychunk= 10 calldatas_y_rechunk sum_num (13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12) start rechunking with (130, 122, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 48) end of y_rechunk CPU times: user 1min 56s, sys: 25.8 s, total: 2min 22s Wall time: 5min 6s
<xarray.Dataset> Dimensions: (t: 28, y: 6540, x: 6560) Coordinates: * t (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00 * y (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560 nav_lat (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray> time_centered (t) object dask.array<chunksize=(28,), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray> Data variables: siconc (t, y, x) float32 dask.array<chunksize=(28, 130, 6560), meta=np.ndarray> Attributes: (12/26) name: /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN... description: ice variables title: ice variables Conventions: CF-1.6 timeStamp: 2022-Jan-18 16:51:17 GMT uuid: 56b165e2-bdda-4b33-a2e9-04a59f3d06e9 ... ... start_date: 20090101 output_frequency: 1d CONFIG: SEDNA CASE: DELTA history: Wed Jan 19 12:40:39 2022: ncks -4 -L 1 SEDNA-DEL... NCO: netCDF Operators version 4.9.1 (Homepage = http:...
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= True #save= True #plot= False Value='IceConce' Zone='ALL' Plot='maps' cmap='Blues' clabel=' ' clim= None outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_maps_ALL_IceConce' data=monitor.optimize_dataset(data) #3 Start computing data= (data.siconc.where(data.siconc >0)).to_dataset(name='siconc').chunk({ 't': -1 }).unify_chunks().persist() monitor.optimize_dataset(data) add optimise here once otimise can recognise
<xarray.Dataset> Dimensions: (t: 28, y: 6540, x: 6560) Coordinates: * t (t) object 2012-02-01 12:00:00 ... 2012-02-28 12:00:00 * y (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560 nav_lat (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray> time_centered (t) object dask.array<chunksize=(28,), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray> Data variables: siconc (t, y, x) float32 dask.array<chunksize=(28, 130, 6560), meta=np.ndarray>
#4 Saving SEDNA_maps_ALL_IceConce data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) start saving data saving data in a file t (28,) 0 slice(0, 28, None)
2022-08-04 11:41:42,803 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:32891 (pid=16438) exceeded 99% memory budget. Restarting... 2022-08-04 11:41:43,029 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:32891 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for return fut.result() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:38851 remote=tcp://127.0.0.1:32891>: ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:32891 after 30 s 2022-08-04 11:41:43,048 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:41:44,050 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:37442 (pid=16263) exceeded 99% memory budget. Restarting... 2022-08-04 11:41:44,256 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:41:45,082 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.57 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:41:45,212 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:40043 (pid=16356) exceeded 99% memory budget. Restarting... 2022-08-04 11:41:45,480 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:41:46,370 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:41:46,469 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:43569 (pid=16436) exceeded 99% memory budget. Restarting... 2022-08-04 11:41:46,738 - distributed.nanny - WARNING - Restarting worker
--------------------------------------------------------------------------- KilledWorker Traceback (most recent call last) File <timed eval>:1, in <module> File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp) 87 print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' ) 88 with performance_report(filename=daskreport+"_save_"+step.Value+".html"): ---> 89 save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) 90 # 5. Plot 91 if plotswitch=='True': File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:24, in datas(data, plot, path, filename) 22 twoD(data,path,filename,nested=False) 23 else : ---> 24 twoD(data,path,filename) 25 return None File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:58, in twoD(data, path, filename, nested) 56 print('saving data in a file') 57 filesave=path+filename ---> 58 return to_mfnetcdf_map(data,prefix=filesave, nested=nested) File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:229, in to_mfnetcdf_map(ds, prefix, nested) 223 template=ds.isel(t=i) 224 mapped=xr.map_blocks( 225 create_eachfile, template 226 ,kwargs=dict(prefix=prefix,nested=nested) 227 ,template=template 228 ) --> 229 mapped.compute() 231 return mapped File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:899, in Dataset.compute(self, **kwargs) 880 """Manually trigger loading and/or computation of this dataset's data 881 from disk or a remote source into memory and return a new dataset. 882 Unlike load, the original dataset is left unaltered. (...) 896 dask.compute 897 """ 898 new = self.copy(deep=False) --> 899 return new.load(**kwargs) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:733, in Dataset.load(self, **kwargs) 730 import dask.array as da 732 # evaluate all the dask arrays simultaneously --> 733 evaluated_data = da.compute(*lazy_data.values(), **kwargs) 735 for k, data in zip(lazy_data, evaluated_data): 736 self.variables[k].data = data File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:598, in compute(traverse, optimize_graph, scheduler, get, *args, **kwargs) 595 keys.append(x.__dask_keys__()) 596 postcomputes.append(x.__dask_postcompute__()) --> 598 results = schedule(dsk, keys, **kwargs) 599 return repack([f(r, *a) for r, (f, a) in zip(results, postcomputes)]) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs) 2999 should_rejoin = False 3000 try: -> 3001 results = self.gather(packed, asynchronous=asynchronous, direct=direct) 3002 finally: 3003 for f in futures.values(): File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous) 2173 else: 2174 local_worker = None -> 2175 return self.sync( 2176 self._gather, 2177 futures, 2178 errors=errors, 2179 direct=direct, 2180 local_worker=local_worker, 2181 asynchronous=asynchronous, 2182 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs) 336 return future 337 else: --> 338 return sync( 339 self.loop, func, *args, callback_timeout=callback_timeout, **kwargs 340 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs) 403 if error: 404 typ, exc, tb = error --> 405 raise exc.with_traceback(tb) 406 else: 407 return result File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f() 376 future = asyncio.wait_for(future, callback_timeout) 377 future = asyncio.ensure_future(future) --> 378 result = yield future 379 except Exception: 380 error = sys.exc_info() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self) 759 exc_info = None 761 try: --> 762 value = future.result() 763 except Exception: 764 exc_info = sys.exc_info() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker) 2036 exc = CancelledError(key) 2037 else: -> 2038 raise exception.with_traceback(traceback) 2039 raise exc 2040 if errors == "skip": KilledWorker: ("('open_dataset-getitem-getitem-81df9a53b6a0ff847a49fca70c0bc8bd', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:43569', name: 26, status: closed, memory: 0, processing: 1>)