%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
# 'month': = 'JOBID' almost month but not really,
# If you submit the job with job scheduler, above
#below are list of enviroment variable one can pass
#%env local='2"
# local : if True run dask local cluster, if not true, put number of workers
# setted in the 'local'
# if no 'local ' given, local will be setted automatically to 'True'
#%env ychunk='2'
#%env tchunk='2'
# controls chunk. 'False' sets no modification from original netcdf file's chunk.
# ychunk=10 will group the original netcdf file to 10 by 10
# tchunk=1 will chunk the time coordinate one by one
#%env control=FWC_SSH
# name of control file to be used for computation/plots/save/
#%env file_exp=
# 'file_exp': Which 'experiment' name is it?
#. this corresopnds to intake catalog name without path and .yaml
#%env year=
# for Validation, this correspoinds to path/year/month 's year
# for monitoring, this corresponids to 'date' having * means do all files in the monitoring directory
# setting it as *0[0-9] &*1[0-9]& *[2-3][0-9], the job can be separated in three lots.
#%env month=
# for monitoring this corresponds to file path path-XIOS.{month}/
#
#%env save= proceed saving? True or False , Default is setted as True
#%env plot= proceed plotting? True or False , Default is setted as True
#%env calc= proceed computation? or just load computed result? True or False , Default is setted as True
#%env save=False
#%env lazy=False
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene4142.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 False rome local cluster starting This code is running on irene4142.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 01 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6414595irene4142.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_01M_AWTMD/ CPU times: user 492 ms, sys: 147 ms, total: 639 ms Wall time: 20.7 s
Client-2adeee59-1355-11ed-b465-080038b9322d
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
44db7a56
Dashboard: http://127.0.0.1:8787/status | Workers: 16 |
Total threads: 128 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-df773bd5-8591-4914-9d89-b164fc07ae85
Comm: tcp://127.0.0.1:36670 | Workers: 16 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 128 |
Started: Just now | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:42378 | Total threads: 8 |
Dashboard: http://127.0.0.1:42890/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:33510 | |
Local directory: /tmp/dask-worker-space/worker-cw5i7haf |
Comm: tcp://127.0.0.1:35394 | Total threads: 8 |
Dashboard: http://127.0.0.1:43870/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:42669 | |
Local directory: /tmp/dask-worker-space/worker-18rh5t61 |
Comm: tcp://127.0.0.1:44612 | Total threads: 8 |
Dashboard: http://127.0.0.1:44746/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43457 | |
Local directory: /tmp/dask-worker-space/worker-knwaj6kr |
Comm: tcp://127.0.0.1:34084 | Total threads: 8 |
Dashboard: http://127.0.0.1:43834/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43927 | |
Local directory: /tmp/dask-worker-space/worker-xw54qdg4 |
Comm: tcp://127.0.0.1:42243 | Total threads: 8 |
Dashboard: http://127.0.0.1:36600/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41596 | |
Local directory: /tmp/dask-worker-space/worker-1tj5ajbu |
Comm: tcp://127.0.0.1:43465 | Total threads: 8 |
Dashboard: http://127.0.0.1:40126/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:44349 | |
Local directory: /tmp/dask-worker-space/worker-05kv_pk_ |
Comm: tcp://127.0.0.1:45184 | Total threads: 8 |
Dashboard: http://127.0.0.1:42887/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43238 | |
Local directory: /tmp/dask-worker-space/worker-45bvw8x7 |
Comm: tcp://127.0.0.1:41835 | Total threads: 8 |
Dashboard: http://127.0.0.1:32810/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:34675 | |
Local directory: /tmp/dask-worker-space/worker-dk7qpqmo |
Comm: tcp://127.0.0.1:36604 | Total threads: 8 |
Dashboard: http://127.0.0.1:46236/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:44837 | |
Local directory: /tmp/dask-worker-space/worker-uhe3liqa |
Comm: tcp://127.0.0.1:39916 | Total threads: 8 |
Dashboard: http://127.0.0.1:38175/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:37531 | |
Local directory: /tmp/dask-worker-space/worker-g5xp_np9 |
Comm: tcp://127.0.0.1:40630 | Total threads: 8 |
Dashboard: http://127.0.0.1:41579/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:38110 | |
Local directory: /tmp/dask-worker-space/worker-4_gpup1w |
Comm: tcp://127.0.0.1:44435 | Total threads: 8 |
Dashboard: http://127.0.0.1:36249/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:37996 | |
Local directory: /tmp/dask-worker-space/worker-vn5cb502 |
Comm: tcp://127.0.0.1:39567 | Total threads: 8 |
Dashboard: http://127.0.0.1:34677/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:44659 | |
Local directory: /tmp/dask-worker-space/worker-13ujwlx2 |
Comm: tcp://127.0.0.1:36367 | Total threads: 8 |
Dashboard: http://127.0.0.1:45564/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:37265 | |
Local directory: /tmp/dask-worker-space/worker-ppx2zr8t |
Comm: tcp://127.0.0.1:42177 | Total threads: 8 |
Dashboard: http://127.0.0.1:34488/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:39366 | |
Local directory: /tmp/dask-worker-space/worker-o56tb9x9 |
Comm: tcp://127.0.0.1:35254 | Total threads: 8 |
Dashboard: http://127.0.0.1:34556/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:41212 | |
Local directory: /tmp/dask-worker-space/worker-_n8o1f2d |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
AW_maxtemp_depth | gridT.votemper,gridS.vosaline,param.mask,param... | calc.AWTD4(data) | ALL | AWTD_map | jet | (0,800) | m | M-5 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= True df.Inputs != nothing True lazy= False ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading <bound method DataSourceBase.describe of sources: param_xios: args: combine: nested concat_dim: y urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc xarray_kwargs: compat: override coords: minimal data_vars: minimal parallel: true description: SEDNA NEMO parameters from MPI output nav_lon lat fails driver: intake_xarray.netcdf.NetCDFSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > {'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}} 0 read gridS ['vosaline'] lazy= False using load_data_xios_kerchunk reading gridS using load_data_xios_kerchunk reading <bound method DataSourceBase.describe of sources: data_xios_kerchunk: args: consolidated: false storage_options: fo: file:////ccc/cont003/home/ra5563/ra5563/catalogue/DELTA/201201/gridS_0[0-5][0-9][0-9].json target_protocol: file urlpath: reference:// description: CREG025 NEMO outputs from different xios server in kerchunk format driver: intake_xarray.xzarr.ZarrSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > took 24.80897879600525 seconds 0 merging gridS ['vosaline'] 1 read gridT ['votemper'] lazy= False using load_data_xios_kerchunk reading gridT using load_data_xios_kerchunk reading <bound method DataSourceBase.describe of sources: data_xios_kerchunk: args: consolidated: false storage_options: fo: file:////ccc/cont003/home/ra5563/ra5563/catalogue/DELTA/201201/gridT_0[0-5][0-9][0-9].json target_protocol: file urlpath: reference:// description: CREG025 NEMO outputs from different xios server in kerchunk format driver: intake_xarray.xzarr.ZarrSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > took 25.652851581573486 seconds 1 merging gridT ['votemper'] took 0.7913141250610352 seconds param nav_lon will be included in data param depth will be included in data param nav_lat will be included in data param mask2d will be included in data param mask will be included in data CPU times: user 36.7 s, sys: 5.42 s, total: 42.1 s Wall time: 1min 14s
<xarray.Dataset> Dimensions: (t: 31, z: 150, y: 6540, x: 6560) Coordinates: time_centered (t) object dask.array<chunksize=(1,), meta=np.ndarray> * t (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00 * y (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560 * z (z) int64 1 2 3 4 5 6 7 8 ... 143 144 145 146 147 148 149 150 nav_lon (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> depth (z, y, x) float32 dask.array<chunksize=(150, 13, 6560), meta=np.ndarray> nav_lat (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(13, 6560), meta=np.ndarray> mask (z, y, x) bool dask.array<chunksize=(150, 13, 6560), meta=np.ndarray> Data variables: vosaline (t, z, y, x) float32 dask.array<chunksize=(1, 150, 13, 6560), meta=np.ndarray> votemper (t, z, y, x) float32 dask.array<chunksize=(1, 150, 13, 6560), meta=np.ndarray> Attributes: (12/26) CASE: DELTA CONFIG: SEDNA Conventions: CF-1.6 DOMAIN_dimensions_ids: [2, 3] DOMAIN_halo_size_end: [0, 0] DOMAIN_halo_size_start: [0, 0] ... ... nj: 13 output_frequency: 1d start_date: 20090101 timeStamp: 2022-Jan-17 19:00:16 GMT title: ocean T grid variables uuid: d8db76f6-a436-451a-9ab1-72dc892753af
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= True #save= True #plot= False Value='AW_maxtemp_depth' Zone='ALL' Plot='AWTD_map' cmap='jet' clabel='m' clim= (0, 800) outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_AWTD_map_ALL_AW_maxtemp_depth' monitor.optimize_dataset(data) #3 Start computing dtaa= calc.AWTD4(data) monitor.optimize_dataset(dtaa)
<xarray.Dataset> Dimensions: (t: 31, y: 6540, x: 6560) Coordinates: time_centered (t) object dask.array<chunksize=(1,), meta=np.ndarray> * t (t) object 2012-01-01 12:00:00 ... 2012-01-31 12:00:00 * y (y) int64 1 2 3 4 5 6 7 ... 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 ... 6555 6556 6557 6558 6559 6560 nav_lon (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> nav_lat (y, x) float32 dask.array<chunksize=(13, 6560), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(13, 6560), meta=np.ndarray> Data variables: AWT (t, y, x) float32 dask.array<chunksize=(1, 13, 6560), meta=np.ndarray> AWD (t, y, x) float32 dask.array<chunksize=(1, 13, 6560), meta=np.ndarray>
#4 Saving SEDNA_AWTD_map_ALL_AW_maxtemp_depth dtaa=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) start saving data saving data in a file t (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 slice(0, 1, None)
/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) 2022-08-03 19:55:24,368 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44435 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44435 after 30 s 2022-08-03 19:55:24,416 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44435 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44435 after 30 s 2022-08-03 19:55:38,036 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44435 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44435 after 30 s 2022-08-03 19:55:38,271 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44435 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44435 after 30 s /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) 2022-08-03 19:55:38,693 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44435 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44435 after 30 s /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) 2022-08-03 19:55:38,771 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44435 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44435 after 30 s /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims)
slice(1, 2, None)
/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims) /ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims)
slice(2, 3, None)
/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims)
slice(3, 4, None)
/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims)
slice(4, 5, None)
/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/dask/array/reductions.py:608: RuntimeWarning: All-NaN slice encountered return np.nanmax(x_chunk, axis=axis, keepdims=keepdims)
slice(5, 6, None) slice(6, 7, None) slice(7, 8, None) slice(8, 9, None) slice(9, 10, None) slice(10, 11, None) slice(11, 12, None) slice(12, 13, None) slice(13, 14, None) slice(14, 15, None)
2022-08-03 20:03:39,460 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(15, 16, None)
2022-08-03 20:03:57,983 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:04:00,736 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:04:23,227 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(16, 17, None)
2022-08-03 20:04:31,185 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:04:33,583 - distributed.utils_perf - WARNING - full garbage collections took 11% CPU time recently (threshold: 10%)
slice(17, 18, None)
2022-08-03 20:05:05,247 - distributed.utils_perf - WARNING - full garbage collections took 11% CPU time recently (threshold: 10%) 2022-08-03 20:05:12,368 - distributed.utils_perf - WARNING - full garbage collections took 11% CPU time recently (threshold: 10%) 2022-08-03 20:05:14,588 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(18, 19, None)
2022-08-03 20:05:43,837 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:05:46,737 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:05:48,070 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(19, 20, None)
2022-08-03 20:06:17,214 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:06:20,539 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:06:23,621 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(20, 21, None)
2022-08-03 20:06:49,840 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:06:53,326 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:07:05,263 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(21, 22, None)
2022-08-03 20:07:23,226 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:07:25,945 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:07:48,747 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(22, 23, None)
2022-08-03 20:07:57,060 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(23, 24, None) slice(24, 25, None) slice(25, 26, None) slice(26, 27, None)
2022-08-03 20:10:20,091 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(27, 28, None)
2022-08-03 20:10:44,205 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:10:51,919 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:10:54,741 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(28, 29, None)
2022-08-03 20:11:22,889 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:11:27,568 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(29, 30, None)
2022-08-03 20:11:55,721 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:12:06,656 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
slice(30, 31, None)
2022-08-03 20:12:27,478 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:12:30,349 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%) 2022-08-03 20:13:01,021 - distributed.utils_perf - WARNING - full garbage collections took 10% CPU time recently (threshold: 10%)
CPU times: user 10min 55s, sys: 1min 29s, total: 12min 24s Wall time: 18min 26s