%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
below are list of enviroment variable one can pass
local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'
#
Monitor.sh calls M_MLD_2D
and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh
AWTD.sh M_AWTMD
Fluxnet.sh M_Fluxnet
FWC_SSH.sh M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly
Integrals.sh M_Mean_temp_velo M_Mooring M_Sectionx M_Sectiony
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene4874.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 False tgcc local cluster starting This code is running on irene4874.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 01 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6419116irene4874.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_01M_Ice_quantities/ CPU times: user 3.96 s, sys: 711 ms, total: 4.67 s Wall time: 1min 42s
Client-f2f80921-13d8-11ed-82a3-080038b93b1f
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
e92ea649
Dashboard: http://127.0.0.1:8787/status | Workers: 64 |
Total threads: 256 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-771f0638-ff62-42d3-9936-c5642b971cfd
Comm: tcp://127.0.0.1:36099 | Workers: 64 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 256 |
Started: 1 minute ago | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:33877 | Total threads: 4 |
Dashboard: http://127.0.0.1:40127/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33183 | |
Local directory: /tmp/dask-worker-space/worker-q2j3yc98 |
Comm: tcp://127.0.0.1:41974 | Total threads: 4 |
Dashboard: http://127.0.0.1:45794/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40847 | |
Local directory: /tmp/dask-worker-space/worker-llxvhqof |
Comm: tcp://127.0.0.1:34665 | Total threads: 4 |
Dashboard: http://127.0.0.1:40900/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34010 | |
Local directory: /tmp/dask-worker-space/worker-z810zl57 |
Comm: tcp://127.0.0.1:39274 | Total threads: 4 |
Dashboard: http://127.0.0.1:33715/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37047 | |
Local directory: /tmp/dask-worker-space/worker-gmwatt7e |
Comm: tcp://127.0.0.1:33177 | Total threads: 4 |
Dashboard: http://127.0.0.1:38295/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39255 | |
Local directory: /tmp/dask-worker-space/worker-0zgpuhw3 |
Comm: tcp://127.0.0.1:41742 | Total threads: 4 |
Dashboard: http://127.0.0.1:40562/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34751 | |
Local directory: /tmp/dask-worker-space/worker-fu5m4y54 |
Comm: tcp://127.0.0.1:34539 | Total threads: 4 |
Dashboard: http://127.0.0.1:42287/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36236 | |
Local directory: /tmp/dask-worker-space/worker-m6tvre39 |
Comm: tcp://127.0.0.1:36667 | Total threads: 4 |
Dashboard: http://127.0.0.1:35214/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38431 | |
Local directory: /tmp/dask-worker-space/worker-tvthktr3 |
Comm: tcp://127.0.0.1:40399 | Total threads: 4 |
Dashboard: http://127.0.0.1:34237/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45310 | |
Local directory: /tmp/dask-worker-space/worker-3geau138 |
Comm: tcp://127.0.0.1:35975 | Total threads: 4 |
Dashboard: http://127.0.0.1:40984/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40644 | |
Local directory: /tmp/dask-worker-space/worker-sq6xbg20 |
Comm: tcp://127.0.0.1:42444 | Total threads: 4 |
Dashboard: http://127.0.0.1:39571/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40936 | |
Local directory: /tmp/dask-worker-space/worker-owyr218d |
Comm: tcp://127.0.0.1:38498 | Total threads: 4 |
Dashboard: http://127.0.0.1:36937/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43272 | |
Local directory: /tmp/dask-worker-space/worker-_7v4zprc |
Comm: tcp://127.0.0.1:34300 | Total threads: 4 |
Dashboard: http://127.0.0.1:46189/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33513 | |
Local directory: /tmp/dask-worker-space/worker-svvm3t_b |
Comm: tcp://127.0.0.1:33002 | Total threads: 4 |
Dashboard: http://127.0.0.1:40769/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43239 | |
Local directory: /tmp/dask-worker-space/worker-7ouzifkf |
Comm: tcp://127.0.0.1:46775 | Total threads: 4 |
Dashboard: http://127.0.0.1:38161/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44946 | |
Local directory: /tmp/dask-worker-space/worker-9o41dhqu |
Comm: tcp://127.0.0.1:40345 | Total threads: 4 |
Dashboard: http://127.0.0.1:41065/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33938 | |
Local directory: /tmp/dask-worker-space/worker-qy1uakq6 |
Comm: tcp://127.0.0.1:38086 | Total threads: 4 |
Dashboard: http://127.0.0.1:38157/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33743 | |
Local directory: /tmp/dask-worker-space/worker-3qe5ge1h |
Comm: tcp://127.0.0.1:41805 | Total threads: 4 |
Dashboard: http://127.0.0.1:43060/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38228 | |
Local directory: /tmp/dask-worker-space/worker-6re1vt72 |
Comm: tcp://127.0.0.1:36510 | Total threads: 4 |
Dashboard: http://127.0.0.1:33249/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35734 | |
Local directory: /tmp/dask-worker-space/worker-pp6uvhds |
Comm: tcp://127.0.0.1:36481 | Total threads: 4 |
Dashboard: http://127.0.0.1:37246/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44950 | |
Local directory: /tmp/dask-worker-space/worker-wlm5fipu |
Comm: tcp://127.0.0.1:43997 | Total threads: 4 |
Dashboard: http://127.0.0.1:44143/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42034 | |
Local directory: /tmp/dask-worker-space/worker-g7kynnvg |
Comm: tcp://127.0.0.1:45746 | Total threads: 4 |
Dashboard: http://127.0.0.1:33303/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42864 | |
Local directory: /tmp/dask-worker-space/worker-9fld2a6i |
Comm: tcp://127.0.0.1:46345 | Total threads: 4 |
Dashboard: http://127.0.0.1:34835/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41994 | |
Local directory: /tmp/dask-worker-space/worker-132kvh8t |
Comm: tcp://127.0.0.1:41422 | Total threads: 4 |
Dashboard: http://127.0.0.1:33889/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41255 | |
Local directory: /tmp/dask-worker-space/worker-2i1k9os3 |
Comm: tcp://127.0.0.1:45291 | Total threads: 4 |
Dashboard: http://127.0.0.1:36350/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42715 | |
Local directory: /tmp/dask-worker-space/worker-djhfcry8 |
Comm: tcp://127.0.0.1:34458 | Total threads: 4 |
Dashboard: http://127.0.0.1:42335/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36565 | |
Local directory: /tmp/dask-worker-space/worker-l4f7yjaj |
Comm: tcp://127.0.0.1:35398 | Total threads: 4 |
Dashboard: http://127.0.0.1:36942/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45397 | |
Local directory: /tmp/dask-worker-space/worker-q9g6zn6s |
Comm: tcp://127.0.0.1:39023 | Total threads: 4 |
Dashboard: http://127.0.0.1:44901/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43130 | |
Local directory: /tmp/dask-worker-space/worker-gw_b3lwm |
Comm: tcp://127.0.0.1:36152 | Total threads: 4 |
Dashboard: http://127.0.0.1:33070/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37235 | |
Local directory: /tmp/dask-worker-space/worker-5twck3fc |
Comm: tcp://127.0.0.1:43104 | Total threads: 4 |
Dashboard: http://127.0.0.1:36981/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:46622 | |
Local directory: /tmp/dask-worker-space/worker-hk5zm_et |
Comm: tcp://127.0.0.1:46322 | Total threads: 4 |
Dashboard: http://127.0.0.1:39265/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45314 | |
Local directory: /tmp/dask-worker-space/worker-xt18__vt |
Comm: tcp://127.0.0.1:46093 | Total threads: 4 |
Dashboard: http://127.0.0.1:40310/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40969 | |
Local directory: /tmp/dask-worker-space/worker-ie62nqb1 |
Comm: tcp://127.0.0.1:45019 | Total threads: 4 |
Dashboard: http://127.0.0.1:33643/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33146 | |
Local directory: /tmp/dask-worker-space/worker-amsct7qs |
Comm: tcp://127.0.0.1:43553 | Total threads: 4 |
Dashboard: http://127.0.0.1:41378/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39752 | |
Local directory: /tmp/dask-worker-space/worker-v469xa2q |
Comm: tcp://127.0.0.1:41792 | Total threads: 4 |
Dashboard: http://127.0.0.1:36606/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42559 | |
Local directory: /tmp/dask-worker-space/worker-53llv043 |
Comm: tcp://127.0.0.1:35663 | Total threads: 4 |
Dashboard: http://127.0.0.1:41082/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44212 | |
Local directory: /tmp/dask-worker-space/worker-elhtbrt0 |
Comm: tcp://127.0.0.1:34352 | Total threads: 4 |
Dashboard: http://127.0.0.1:34783/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43113 | |
Local directory: /tmp/dask-worker-space/worker-6o4c09cz |
Comm: tcp://127.0.0.1:42761 | Total threads: 4 |
Dashboard: http://127.0.0.1:36571/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:36092 | |
Local directory: /tmp/dask-worker-space/worker-rpkj5del |
Comm: tcp://127.0.0.1:37178 | Total threads: 4 |
Dashboard: http://127.0.0.1:33904/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:43413 | |
Local directory: /tmp/dask-worker-space/worker-b409dqej |
Comm: tcp://127.0.0.1:35340 | Total threads: 4 |
Dashboard: http://127.0.0.1:33136/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38126 | |
Local directory: /tmp/dask-worker-space/worker-dbyikdtk |
Comm: tcp://127.0.0.1:46520 | Total threads: 4 |
Dashboard: http://127.0.0.1:34294/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:42901 | |
Local directory: /tmp/dask-worker-space/worker-va54_wbz |
Comm: tcp://127.0.0.1:37743 | Total threads: 4 |
Dashboard: http://127.0.0.1:36452/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39310 | |
Local directory: /tmp/dask-worker-space/worker-m3uhp6r5 |
Comm: tcp://127.0.0.1:38093 | Total threads: 4 |
Dashboard: http://127.0.0.1:44935/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34163 | |
Local directory: /tmp/dask-worker-space/worker-7ubqsiu_ |
Comm: tcp://127.0.0.1:37336 | Total threads: 4 |
Dashboard: http://127.0.0.1:36039/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:33742 | |
Local directory: /tmp/dask-worker-space/worker-qa7zwjsu |
Comm: tcp://127.0.0.1:44821 | Total threads: 4 |
Dashboard: http://127.0.0.1:34519/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41970 | |
Local directory: /tmp/dask-worker-space/worker-7mk_o8o4 |
Comm: tcp://127.0.0.1:34568 | Total threads: 4 |
Dashboard: http://127.0.0.1:35928/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38552 | |
Local directory: /tmp/dask-worker-space/worker-7y7a5wcq |
Comm: tcp://127.0.0.1:36881 | Total threads: 4 |
Dashboard: http://127.0.0.1:41389/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41377 | |
Local directory: /tmp/dask-worker-space/worker-rbmy4h3z |
Comm: tcp://127.0.0.1:37681 | Total threads: 4 |
Dashboard: http://127.0.0.1:45471/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44245 | |
Local directory: /tmp/dask-worker-space/worker-eqm0j4pu |
Comm: tcp://127.0.0.1:35917 | Total threads: 4 |
Dashboard: http://127.0.0.1:39056/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:44526 | |
Local directory: /tmp/dask-worker-space/worker-z5844i65 |
Comm: tcp://127.0.0.1:43395 | Total threads: 4 |
Dashboard: http://127.0.0.1:44322/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38493 | |
Local directory: /tmp/dask-worker-space/worker-lkvl3tbw |
Comm: tcp://127.0.0.1:35981 | Total threads: 4 |
Dashboard: http://127.0.0.1:43148/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38743 | |
Local directory: /tmp/dask-worker-space/worker-szrv2itg |
Comm: tcp://127.0.0.1:41223 | Total threads: 4 |
Dashboard: http://127.0.0.1:38273/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37257 | |
Local directory: /tmp/dask-worker-space/worker-p64fgcxp |
Comm: tcp://127.0.0.1:41090 | Total threads: 4 |
Dashboard: http://127.0.0.1:35193/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38151 | |
Local directory: /tmp/dask-worker-space/worker-jf7rv1dp |
Comm: tcp://127.0.0.1:40695 | Total threads: 4 |
Dashboard: http://127.0.0.1:46618/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38056 | |
Local directory: /tmp/dask-worker-space/worker-njaqp2pl |
Comm: tcp://127.0.0.1:39982 | Total threads: 4 |
Dashboard: http://127.0.0.1:38422/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:45568 | |
Local directory: /tmp/dask-worker-space/worker-q3gyuqu8 |
Comm: tcp://127.0.0.1:39473 | Total threads: 4 |
Dashboard: http://127.0.0.1:45907/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:46110 | |
Local directory: /tmp/dask-worker-space/worker-ord42_hr |
Comm: tcp://127.0.0.1:39893 | Total threads: 4 |
Dashboard: http://127.0.0.1:39139/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:34766 | |
Local directory: /tmp/dask-worker-space/worker-k0bn5znz |
Comm: tcp://127.0.0.1:40686 | Total threads: 4 |
Dashboard: http://127.0.0.1:43854/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39343 | |
Local directory: /tmp/dask-worker-space/worker-ofr29seg |
Comm: tcp://127.0.0.1:45087 | Total threads: 4 |
Dashboard: http://127.0.0.1:44027/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:37191 | |
Local directory: /tmp/dask-worker-space/worker-v_l2nmk3 |
Comm: tcp://127.0.0.1:43370 | Total threads: 4 |
Dashboard: http://127.0.0.1:40245/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:39667 | |
Local directory: /tmp/dask-worker-space/worker-8tcn1965 |
Comm: tcp://127.0.0.1:35962 | Total threads: 4 |
Dashboard: http://127.0.0.1:36018/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:40284 | |
Local directory: /tmp/dask-worker-space/worker-2s_f0b_7 |
Comm: tcp://127.0.0.1:40486 | Total threads: 4 |
Dashboard: http://127.0.0.1:33236/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:35181 | |
Local directory: /tmp/dask-worker-space/worker-5f59q05j |
Comm: tcp://127.0.0.1:39650 | Total threads: 4 |
Dashboard: http://127.0.0.1:46229/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:38214 | |
Local directory: /tmp/dask-worker-space/worker-dum00lfj |
Comm: tcp://127.0.0.1:34606 | Total threads: 4 |
Dashboard: http://127.0.0.1:45793/status | Memory: 3.92 GiB |
Nanny: tcp://127.0.0.1:41454 | |
Local directory: /tmp/dask-worker-space/worker-7zkuthal |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
Ice_quantities | param.e1te2t,icemod.sivelo,icemod.sivolu,icemo... | calc.Ice_quant(data) | ALL | Ice_intquant | None | (0,20) | cm s^(-1) | I-2 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= True df.Inputs != nothing True lazy= True ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading <bound method DataSourceBase.describe of sources: param_xios: args: combine: nested concat_dim: y urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc xarray_kwargs: compat: override coords: minimal data_vars: minimal parallel: true description: SEDNA NEMO parameters from MPI output nav_lon lat fails driver: intake_xarray.netcdf.NetCDFSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > {'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}} 0 read icemod ['sivolu', 'siconc', 'sivelo'] lazy= True using load_data_xios reading icemod using load_data_xios reading {'name': 'data_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO outputs from different xios server', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'name of config', 'type': 'str', 'default': '/ccc/scratch/cont003/gen7420/talandel/SEDNA/SEDNA-DELTA-S/SPLIT/1d'}, {'name': 'fileexp', 'description': 'name of config', 'type': 'str', 'default': 'SEDNA-DELTA'}, {'name': 'month', 'description': 'running number 2 digit', 'type': 'str', 'default': '02'}, {'name': 'freq', 'description': '1d or 1m', 'type': 'str', 'default': '1d'}, {'name': 'year', 'description': 'last digits of yearmonthdate.', 'type': 'str', 'default': '2012'}, {'name': 'file', 'description': 'file name', 'type': 'str', 'default': 'icemod'}, {'name': 'eio', 'description': 'xios mpi number', 'type': 'str', 'default': '0[0-5][0-9][0-9]'}], 'metadata': {}, 'args': {'urlpath': '{{path}}/{{year}}/{{month}}/*{{file}}_*_{{eio}}.nc', 'combine': 'nested', 'concat_dim': 'time_counter,x,y'}} took 267.37703800201416 seconds 0 merging icemod ['sivolu', 'siconc', 'sivelo'] param mask2d will be included in data param nav_lat will be included in data param nav_lon will be included in data param e1te2t will be included in data CPU times: user 1min 58s, sys: 27.9 s, total: 2min 26s Wall time: 5min 26s
<xarray.Dataset> Dimensions: (t: 31, y: 6540, x: 6560) Coordinates: * t (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00 * y (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560 nav_lat (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> time_centered (t) object dask.array<chunksize=(31,), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(13, 6560), meta=np.ndarray> e1te2t (y, x) float64 dask.array<chunksize=(13, 6560), meta=np.ndarray> Data variables: sivolu (t, y, x) float32 dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray> siconc (t, y, x) float32 dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray> sivelo (t, y, x) float32 dask.array<chunksize=(31, 6540, 6560), meta=np.ndarray> Attributes: (12/26) name: /ccc/scratch/cont003/ra5563/talandel/ONGOING-RUN... description: ice variables title: ice variables Conventions: CF-1.6 timeStamp: 2022-Jan-17 19:00:05 GMT uuid: 65f78891-6a37-4a91-8ad4-7c8b5dc0d456 ... ... start_date: 20090101 output_frequency: 1d CONFIG: SEDNA CASE: DELTA history: Tue Jan 18 17:20:08 2022: ncks -4 -L 1 SEDNA-DEL... NCO: netCDF Operators version 4.9.1 (Homepage = http:...
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= True #save= True #plot= False Value='Ice_quantities' Zone='ALL' Plot='Ice_intquant' cmap='None' clabel='cm s^(-1)' clim= (0, 20) outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_Ice_intquant_ALL_Ice_quantities' data=monitor.optimize_dataset(data) #3 Start computing data= calc.Ice_quant(data) monitor.optimize_dataset(data) add optimise here once otimise can recognise
<xarray.Dataset> Dimensions: (t: 31) Coordinates: * t (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00 time_centered (t) object dask.array<chunksize=(31,), meta=np.ndarray> Data variables: Ice volume (t) float64 dask.array<chunksize=(31,), meta=np.ndarray> Ice area (t) float64 dask.array<chunksize=(31,), meta=np.ndarray> Ice extent (t) float64 dask.array<chunksize=(31,), meta=np.ndarray> Ice drift (t) float64 dask.array<chunksize=(31,), meta=np.ndarray>
#4 Saving SEDNA_Ice_intquant_ALL_Ice_quantities data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) start saving data saving data in a csv file ../nc_results/SEDNA_DELTA_MONITOR/SEDNA_Ice_intquant_ALL_Ice_quantities2012-01-01_2012-01-31.nc
2022-08-04 11:43:11,203 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:33002 (pid=164740) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:11,423 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.69 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:43:11,465 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:11,555 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:40686 (pid=164781) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:11,778 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:12,156 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.70 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:43:12,258 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39274 (pid=164791) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:12,541 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:12,791 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.74 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:43:12,836 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39023 (pid=164611) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:13,059 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:13,353 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:34665 (pid=164749) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:13,595 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:14,002 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35340 (pid=164730) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:14,208 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:14,284 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35398 (pid=164616) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:14,662 - distributed.nanny - WARNING - Restarting worker 2022-08-04 11:43:15,183 - distributed.worker_memory - WARNING - Unmanaged memory use is high. This may indicate a memory leak or the memory may not be released to the OS; see https://distributed.dask.org/en/latest/worker-memory.html#memory-not-released-back-to-the-os for more information. -- Unmanaged memory: 3.68 GiB -- Worker memory limit: 3.92 GiB 2022-08-04 11:43:15,315 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:35917 (pid=164625) exceeded 99% memory budget. Restarting... 2022-08-04 11:43:16,041 - distributed.nanny - WARNING - Restarting worker
--------------------------------------------------------------------------- KilledWorker Traceback (most recent call last) File <timed eval>:1, in <module> File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py:89, in auto(df, val, savefig, daskreport, outputpath, file_exp) 87 print('data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename)' ) 88 with performance_report(filename=daskreport+"_save_"+step.Value+".html"): ---> 89 save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) 90 # 5. Plot 91 if plotswitch=='True': File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:16, in datas(data, plot, path, filename) 14 print('start saving data' ) 15 if 'int' in plot: ---> 16 savedfile=integral(data,path,filename) 17 print('save computed data at',savedfile,'completed') 18 elif 'Mooring' in plot: File /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/save.py:87, in integral(data, path, filename) 85 print('saving data in a csv file',filesave) 86 #data[filename]=data ---> 87 data.to_netcdf(filesave,mode='w') 88 return filesave File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/core/dataset.py:1882, in Dataset.to_netcdf(self, path, mode, format, group, engine, encoding, unlimited_dims, compute, invalid_netcdf) 1879 encoding = {} 1880 from ..backends.api import to_netcdf -> 1882 return to_netcdf( # type: ignore # mypy cannot resolve the overloads:( 1883 self, 1884 path, 1885 mode=mode, 1886 format=format, 1887 group=group, 1888 engine=engine, 1889 encoding=encoding, 1890 unlimited_dims=unlimited_dims, 1891 compute=compute, 1892 multifile=False, 1893 invalid_netcdf=invalid_netcdf, 1894 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/api.py:1219, in to_netcdf(dataset, path_or_file, mode, format, group, engine, encoding, unlimited_dims, compute, multifile, invalid_netcdf) 1216 if multifile: 1217 return writer, store -> 1219 writes = writer.sync(compute=compute) 1221 if isinstance(target, BytesIO): 1222 store.sync() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/xarray/backends/common.py:168, in ArrayWriter.sync(self, compute) 162 import dask.array as da 164 # TODO: consider wrapping targets with dask.delayed, if this makes 165 # for any discernible difference in perforance, e.g., 166 # targets = [dask.delayed(t) for t in self.targets] --> 168 delayed_store = da.store( 169 self.sources, 170 self.targets, 171 lock=self.lock, 172 compute=compute, 173 flush=True, 174 regions=self.regions, 175 ) 176 self.sources = [] 177 self.targets = [] File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/core.py:1229, in store(***failed resolving arguments***) 1227 elif compute: 1228 store_dsk = HighLevelGraph(layers, dependencies) -> 1229 compute_as_if_collection(Array, store_dsk, map_keys, **kwargs) 1230 return None 1232 else: File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/base.py:342, in compute_as_if_collection(cls, dsk, keys, scheduler, get, **kwargs) 340 schedule = get_scheduler(scheduler=scheduler, cls=cls, get=get) 341 dsk2 = optimization_function(cls)(dsk, keys, **kwargs) --> 342 return schedule(dsk2, keys, **kwargs) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:3001, in Client.get(self, dsk, keys, workers, allow_other_workers, resources, sync, asynchronous, direct, retries, priority, fifo_timeout, actors, **kwargs) 2999 should_rejoin = False 3000 try: -> 3001 results = self.gather(packed, asynchronous=asynchronous, direct=direct) 3002 finally: 3003 for f in futures.values(): File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2175, in Client.gather(self, futures, errors, direct, asynchronous) 2173 else: 2174 local_worker = None -> 2175 return self.sync( 2176 self._gather, 2177 futures, 2178 errors=errors, 2179 direct=direct, 2180 local_worker=local_worker, 2181 asynchronous=asynchronous, 2182 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:338, in SyncMethodMixin.sync(self, func, asynchronous, callback_timeout, *args, **kwargs) 336 return future 337 else: --> 338 return sync( 339 self.loop, func, *args, callback_timeout=callback_timeout, **kwargs 340 ) File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:405, in sync(loop, func, callback_timeout, *args, **kwargs) 403 if error: 404 typ, exc, tb = error --> 405 raise exc.with_traceback(tb) 406 else: 407 return result File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils.py:378, in sync.<locals>.f() 376 future = asyncio.wait_for(future, callback_timeout) 377 future = asyncio.ensure_future(future) --> 378 result = yield future 379 except Exception: 380 error = sys.exc_info() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/gen.py:762, in Runner.run(self) 759 exc_info = None 761 try: --> 762 value = future.result() 763 except Exception: 764 exc_info = sys.exc_info() File /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/client.py:2038, in Client._gather(self, futures, errors, direct, local_worker) 2036 exc = CancelledError(key) 2037 else: -> 2038 raise exception.with_traceback(traceback) 2039 raise exc 2040 if errors == "skip": KilledWorker: ("('open_dataset-getitem-getitem-getitem-f611aea20f9738d6721b37c6c7a61da4', 0, 0, 0)", <WorkerState 'tcp://127.0.0.1:35398', name: 26, status: closed, memory: 0, processing: 2>)