%matplotlib inline
import pandas as pd
import socket
host = socket.getfqdn()
from core import load, zoom, calc, save,plots,monitor
#reload funcs after updating ./core/*.py
import importlib
importlib.reload(load)
importlib.reload(zoom)
importlib.reload(calc)
importlib.reload(save)
importlib.reload(plots)
importlib.reload(monitor)
<module 'core.monitor' from '/ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/core/monitor.py'>
If you submit the job with job scheduler; below are list of enviroment variable one can pass
local : if True run dask local cluster, if not true, put number of workers setted in the 'local' if no 'local ' given, local will be setted automatically to 'True'
%env ychunk='2', #%env tchunk='2'
controls chunk. 'False' sets no modification from original netcdf file's chunk.
ychunk=10 will group the original netcdf file to 10 by 10
tchunk=1 will chunk the time coordinate one by one
%env file_exp=
'file_exp': Which 'experiment' name is it? this corresopnds to intake catalog name without path and .yaml
#%env year=
for Validation, this correspoinds to path/year/month 's year for monitoring, this corresponids to 'date' having means do all files in the monitoring directory setting it as 0[0-9] &1[0-9]& [2-3][0-9], the job can be separated in three lots. For DELTA experiment, year corresponds to really 'year'
%env month=
for monitoring this corresponds to file path path-XIOS.{month}/
For DELTA experiment, year corresponds to really 'month'
proceed saving? True or False , Default is setted as True
proceed plotting? True or False , Default is setted as True
proceed computation? or just load computed result? True or False , Default is setted as True
save output file used for plotting
using kerchunked file -> False, not using kerhcunk -> True
name of control file to be used for computation/plots/save/ We have number of M_xxx.csv
Monitor.sh calls M_MLD_2D
and AWTD.sh, Fluxnet.sh, Siconc.sh, IceClim.sh, FWC_SSH.sh, Integrals.sh , Sections.sh
M_AWTMD
M_Fluxnet
M_Ice_quantities
M_IceClim M_IceConce M_IceThick
M_FWC_2D M_FWC_integrals M_FWC_SSH M_SSH_anomaly
M_Mean_temp_velo M_Mooring
M_Sectionx M_Sectiony
%%time
# 'savefig': Do we save output in html? or not. keep it true.
savefig=True
client,cluster,control,catalog_url,month,year,daskreport,outputpath = load.set_control(host)
!mkdir -p $outputpath
!mkdir -p $daskreport
client
local True using host= irene5316.c-irene.mg1.tgcc.ccc.cea.fr starting dask cluster on local= True workers 16 10000000000 rome local cluster starting This code is running on irene5316.c-irene.mg1.tgcc.ccc.cea.fr using SEDNA_DELTA_MONITOR file experiment, read from ../lib/SEDNA_DELTA_MONITOR.yaml on year= 2012 on month= 05 outputpath= ../results/SEDNA_DELTA_MONITOR/ daskreport= ../results/dask/6476243irene5316.c-irene.mg1.tgcc.ccc.cea.fr_SEDNA_DELTA_MONITOR_05M_FWC_2D/ CPU times: user 498 ms, sys: 122 ms, total: 620 ms Wall time: 21.3 s
Client-131319db-1970-11ed-9a5c-080038b93de5
Connection method: Cluster object | Cluster type: distributed.LocalCluster |
Dashboard: http://127.0.0.1:8787/status |
9336943c
Dashboard: http://127.0.0.1:8787/status | Workers: 16 |
Total threads: 128 | Total memory: 251.06 GiB |
Status: running | Using processes: True |
Scheduler-102de33a-850e-48fc-88e6-01d03a95c45d
Comm: tcp://127.0.0.1:32907 | Workers: 16 |
Dashboard: http://127.0.0.1:8787/status | Total threads: 128 |
Started: Just now | Total memory: 251.06 GiB |
Comm: tcp://127.0.0.1:40222 | Total threads: 8 |
Dashboard: http://127.0.0.1:46019/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:46255 | |
Local directory: /tmp/dask-worker-space/worker-739mjtm8 |
Comm: tcp://127.0.0.1:44044 | Total threads: 8 |
Dashboard: http://127.0.0.1:39190/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:44155 | |
Local directory: /tmp/dask-worker-space/worker-4tijkrwd |
Comm: tcp://127.0.0.1:38170 | Total threads: 8 |
Dashboard: http://127.0.0.1:44447/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:39997 | |
Local directory: /tmp/dask-worker-space/worker-fmhgwqq7 |
Comm: tcp://127.0.0.1:45688 | Total threads: 8 |
Dashboard: http://127.0.0.1:42873/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43473 | |
Local directory: /tmp/dask-worker-space/worker-5ou4yr13 |
Comm: tcp://127.0.0.1:41461 | Total threads: 8 |
Dashboard: http://127.0.0.1:41746/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43341 | |
Local directory: /tmp/dask-worker-space/worker-5akl10i3 |
Comm: tcp://127.0.0.1:39906 | Total threads: 8 |
Dashboard: http://127.0.0.1:40628/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:46768 | |
Local directory: /tmp/dask-worker-space/worker-56n55428 |
Comm: tcp://127.0.0.1:39636 | Total threads: 8 |
Dashboard: http://127.0.0.1:35397/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:34843 | |
Local directory: /tmp/dask-worker-space/worker-6tsmkmp7 |
Comm: tcp://127.0.0.1:32773 | Total threads: 8 |
Dashboard: http://127.0.0.1:42746/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:42402 | |
Local directory: /tmp/dask-worker-space/worker-a7rv4bqr |
Comm: tcp://127.0.0.1:33205 | Total threads: 8 |
Dashboard: http://127.0.0.1:40781/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:35527 | |
Local directory: /tmp/dask-worker-space/worker-qa37yg85 |
Comm: tcp://127.0.0.1:34269 | Total threads: 8 |
Dashboard: http://127.0.0.1:40004/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:45734 | |
Local directory: /tmp/dask-worker-space/worker-h402e8ne |
Comm: tcp://127.0.0.1:34309 | Total threads: 8 |
Dashboard: http://127.0.0.1:37738/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:37369 | |
Local directory: /tmp/dask-worker-space/worker-qwasv0aq |
Comm: tcp://127.0.0.1:40698 | Total threads: 8 |
Dashboard: http://127.0.0.1:37238/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43138 | |
Local directory: /tmp/dask-worker-space/worker-k67o979a |
Comm: tcp://127.0.0.1:38534 | Total threads: 8 |
Dashboard: http://127.0.0.1:41012/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:35020 | |
Local directory: /tmp/dask-worker-space/worker-fqvdp4q9 |
Comm: tcp://127.0.0.1:38175 | Total threads: 8 |
Dashboard: http://127.0.0.1:46235/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:46080 | |
Local directory: /tmp/dask-worker-space/worker-79592jln |
Comm: tcp://127.0.0.1:37053 | Total threads: 8 |
Dashboard: http://127.0.0.1:41091/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:43412 | |
Local directory: /tmp/dask-worker-space/worker-nf584qtr |
Comm: tcp://127.0.0.1:44612 | Total threads: 8 |
Dashboard: http://127.0.0.1:37784/status | Memory: 15.69 GiB |
Nanny: tcp://127.0.0.1:35731 | |
Local directory: /tmp/dask-worker-space/worker-4s32j9tt |
df=load.controlfile(control)
#Take out 'later' tagged computations
#df=df[~df['Value'].str.contains('later')]
df
Value | Inputs | Equation | Zone | Plot | Colourmap | MinMax | Unit | Oldname | Unnamed: 10 | |
---|---|---|---|---|---|---|---|---|---|---|
FWC_2D | gridS.vosaline,param.mask,param.e3t,param.e1te2t | calc.FWC2D_UFUNC(data) | BBFG | maps | Spectral_r | (0,24) | m | S-1 |
Each computation consists of
%%time
import os
calcswitch=os.environ.get('calc', 'True')
lazy=os.environ.get('lazy','False' )
loaddata=((df.Inputs != '').any())
print('calcswitch=',calcswitch,'df.Inputs != nothing',loaddata, 'lazy=',lazy)
data = load.datas(catalog_url,df.Inputs,month,year,daskreport,lazy=lazy) if ((calcswitch=='True' )*loaddata) else 0
data
calcswitch= True df.Inputs != nothing True lazy= False ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading ../lib/SEDNA_DELTA_MONITOR.yaml using param_xios reading <bound method DataSourceBase.describe of sources: param_xios: args: combine: nested concat_dim: y urlpath: /ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc xarray_kwargs: compat: override coords: minimal data_vars: minimal parallel: true description: SEDNA NEMO parameters from MPI output nav_lon lat fails driver: intake_xarray.netcdf.NetCDFSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > {'name': 'param_xios', 'container': 'xarray', 'plugin': ['netcdf'], 'driver': ['netcdf'], 'description': 'SEDNA NEMO parameters from MPI output nav_lon lat fails', 'direct_access': 'forbid', 'user_parameters': [{'name': 'path', 'description': 'file coordinate', 'type': 'str', 'default': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/MESH/SEDNA_mesh_mask_Tgt_20210423_tsh10m_L1/param'}], 'metadata': {}, 'args': {'urlpath': '/ccc/work/cont003/gen7420/odakatin/CONFIGS/SEDNA/SEDNA-I/SEDNA_Domain_cfg_Tgt_20210423_tsh10m_L1/param_f32/x_*.nc', 'combine': 'nested', 'concat_dim': 'y'}} 0 read gridS ['vosaline'] lazy= False using load_data_xios_kerchunk reading gridS using load_data_xios_kerchunk reading <bound method DataSourceBase.describe of sources: data_xios_kerchunk: args: consolidated: false storage_options: fo: file:////ccc/cont003/home/ra5563/ra5563/catalogue/DELTA/201205/gridS_0[0-5][0-9][0-9].json target_protocol: file urlpath: reference:// description: CREG025 NEMO outputs from different xios server in kerchunk format driver: intake_xarray.xzarr.ZarrSource metadata: catalog_dir: /ccc/work/cont003/gen7420/odakatin/monitor-sedna/notebook/../lib/ > took 20.247822046279907 seconds 0 merging gridS ['vosaline'] param e1te2t will be included in data param e3t will be included in data param nav_lon will be included in data param mask will be included in data param mask2d will be included in data param nav_lat will be included in data ychunk= 10 calldatas_y_rechunk sum_num (13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 13, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12) start rechunking with (130, 122, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120, 48) end of y_rechunk CPU times: user 20.3 s, sys: 3.04 s, total: 23.3 s Wall time: 45.6 s
<xarray.Dataset> Dimensions: (t: 31, z: 150, y: 6540, x: 6560) Coordinates: * t (t) object 2012-05-01 12:00:00 ... 2012-05-31 12:00:00 * y (y) int64 1 2 3 4 5 6 7 8 ... 6534 6535 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 8 ... 6554 6555 6556 6557 6558 6559 6560 * z (z) int64 1 2 3 4 5 6 7 8 9 ... 143 144 145 146 147 148 149 150 e1te2t (y, x) float64 dask.array<chunksize=(130, 6560), meta=np.ndarray> e3t (z, y, x) float64 dask.array<chunksize=(150, 130, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray> mask (z, y, x) bool dask.array<chunksize=(150, 130, 6560), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(130, 6560), meta=np.ndarray> nav_lat (y, x) float32 dask.array<chunksize=(130, 6560), meta=np.ndarray> Data variables: vosaline (t, z, y, x) float32 dask.array<chunksize=(1, 150, 130, 6560), meta=np.ndarray> Attributes: (12/26) CASE: DELTA CONFIG: SEDNA Conventions: CF-1.6 DOMAIN_dimensions_ids: [2, 3] DOMAIN_halo_size_end: [0, 0] DOMAIN_halo_size_start: [0, 0] ... ... nj: 13 output_frequency: 1d start_date: 20090101 timeStamp: 2022-Jan-22 08:58:17 GMT title: ocean T grid variables uuid: a1983733-71c8-4fa1-b637-a1c552373d7f
%%time
monitor.auto(df,data,savefig,daskreport,outputpath,file_exp='SEDNA'
)
#calc= True #save= True #plot= False Value='FWC_2D' Zone='BBFG' Plot='maps' cmap='Spectral_r' clabel='m' clim= (0, 24) outputpath='../results/SEDNA_DELTA_MONITOR/' nc_outputpath='../nc_results/SEDNA_DELTA_MONITOR/' filename='SEDNA_maps_BBFG_FWC_2D' data=monitor.optimize_dataset(data) #2 Zooming Data data= zoom.BBFG(data) data=monitor.optimize_dataset(data)
<xarray.Dataset> Dimensions: (t: 31, z: 150, y: 5264, x: 6560) Coordinates: * t (t) object 2012-05-01 12:00:00 ... 2012-05-31 12:00:00 * y (y) int64 1277 1278 1279 1280 1281 ... 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 8 ... 6554 6555 6556 6557 6558 6559 6560 * z (z) int64 1 2 3 4 5 6 7 8 9 ... 143 144 145 146 147 148 149 150 e1te2t (y, x) float64 dask.array<chunksize=(56, 6560), meta=np.ndarray> e3t (z, y, x) float64 dask.array<chunksize=(150, 56, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(56, 6560), meta=np.ndarray> mask (z, y, x) bool dask.array<chunksize=(150, 56, 6560), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(56, 6560), meta=np.ndarray> nav_lat (y, x) float32 dask.array<chunksize=(56, 6560), meta=np.ndarray> Data variables: vosaline (t, z, y, x) float32 dask.array<chunksize=(1, 150, 56, 6560), meta=np.ndarray> Attributes: (12/26) CASE: DELTA CONFIG: SEDNA Conventions: CF-1.6 DOMAIN_dimensions_ids: [2, 3] DOMAIN_halo_size_end: [0, 0] DOMAIN_halo_size_start: [0, 0] ... ... nj: 13 output_frequency: 1d start_date: 20090101 timeStamp: 2022-Jan-22 08:58:17 GMT title: ocean T grid variables uuid: a1983733-71c8-4fa1-b637-a1c552373d7f
#3 Start computing data= calc.FWC2D_UFUNC(data) monitor.optimize_dataset(data) add optimise here once otimise can recognise
<xarray.Dataset> Dimensions: (t: 31, y: 5264, x: 6560) Coordinates: * t (t) object 2012-05-01 12:00:00 ... 2012-05-31 12:00:00 * y (y) int64 1277 1278 1279 1280 1281 ... 6536 6537 6538 6539 6540 * x (x) int64 1 2 3 4 5 6 7 8 ... 6554 6555 6556 6557 6558 6559 6560 e1te2t (y, x) float64 dask.array<chunksize=(56, 6560), meta=np.ndarray> nav_lon (y, x) float32 dask.array<chunksize=(56, 6560), meta=np.ndarray> mask2d (y, x) bool dask.array<chunksize=(56, 6560), meta=np.ndarray> nav_lat (y, x) float32 dask.array<chunksize=(56, 6560), meta=np.ndarray> Data variables: FWC2D (t, y, x) float32 dask.array<chunksize=(1, 56, 6560), meta=np.ndarray>
#4 Saving SEDNA_maps_BBFG_FWC_2D data=save.datas(data,plot=Plot,path=nc_outputpath,filename=filename) start saving data saving data in a file t (1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1) 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 slice(0, 1, None)
2022-08-11 14:23:02,592 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:37053 (pid=23224) exceeded 99% memory budget. Restarting... 2022-08-11 14:23:03,472 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:37053 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:44310 remote=tcp://127.0.0.1:37053>: Stream is closed 2022-08-11 14:23:03,526 - distributed.nanny - WARNING - Restarting worker
slice(1, 2, None)
2022-08-11 14:27:22,593 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:42125 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:42125 after 30 s 2022-08-11 14:27:23,956 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44044 after 30 s 2022-08-11 14:27:25,159 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:42125 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:42125 after 30 s 2022-08-11 14:27:25,167 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44044 after 30 s 2022-08-11 14:27:25,693 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:39906 after 30 s 2022-08-11 14:27:27,231 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:44044 after 30 s
slice(2, 3, None)
2022-08-11 14:27:52,890 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:44044 (pid=23251) exceeded 99% memory budget. Restarting... 2022-08-11 14:27:53,798 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:27:55,052 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:33205 -> tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 971, in _handle_write num_bytes = self.write_to_fd(self._write_buffer.peek(size)) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1148, in write_to_fd return self.socket.send(data) # type: ignore ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:33205 remote=tcp://127.0.0.1:56370>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:27:56,016 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:39636 -> tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 971, in _handle_write num_bytes = self.write_to_fd(self._write_buffer.peek(size)) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1148, in write_to_fd return self.socket.send(data) # type: ignore BrokenPipeError: [Errno 32] Broken pipe The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:39636 remote=tcp://127.0.0.1:39904>: BrokenPipeError: [Errno 32] Broken pipe 2022-08-11 14:27:56,854 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:41842 remote=tcp://127.0.0.1:44044>: Stream is closed 2022-08-11 14:27:57,113 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 315, in write raise StreamClosedError() tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 916, in send_recv await comm.write(msg, serializers=serializers, on_error="raise") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 326, in write convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:42142 remote=tcp://127.0.0.1:44044>: Stream is closed 2022-08-11 14:28:04,568 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44044 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:41700 remote=tcp://127.0.0.1:44044>: Stream is closed
slice(3, 4, None)
2022-08-11 14:29:42,791 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:38175 (pid=23212) exceeded 99% memory budget. Restarting... 2022-08-11 14:29:43,799 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:40222 -> tcp://127.0.0.1:38175 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed2022-08-11 14:29:43,800 - distributed.nanny - WARNING - Restarting worker _error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:40222 remote=tcp://127.0.0.1:49742>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:29:43,807 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:38175 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:36354 remote=tcp://127.0.0.1:38175>: Stream is closed 2022-08-11 14:29:44,252 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:34309 -> tcp://127.0.0.1:38175 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 971, in _handle_write num_bytes = self.write_to_fd(self._write_buffer.peek(size)) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1148, in write_to_fd return self.socket.send(data) # type: ignore ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:34309 remote=tcp://127.0.0.1:37324>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:29:44,979 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:38175 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:35704 remote=tcp://127.0.0.1:38175>: Stream is closed 2022-08-11 14:29:45,672 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:38175 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:35944 remote=tcp://127.0.0.1:38175>: Stream is closed 2022-08-11 14:29:47,494 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:38175 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:36342 remote=tcp://127.0.0.1:38175>: Stream is closed 2022-08-11 14:29:56,691 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:42125 (pid=23834) exceeded 99% memory budget. Restarting... 2022-08-11 14:29:57,659 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:30:09,307 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:42125 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:37830 remote=tcp://127.0.0.1:42125>: Stream is closed 2022-08-11 14:30:25,435 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:38534 -> tcp://127.0.0.1:42125 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 971, in _handle_write num_bytes = self.write_to_fd(self._write_buffer.peek(size)) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1148, in write_to_fd return self.socket.send(data) # type: ignore BrokenPipeError: [Errno 32] Broken pipe The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:38534 remote=tcp://127.0.0.1:48238>: BrokenPipeError: [Errno 32] Broken pipe 2022-08-11 14:30:56,151 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:42125 ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 291, in connect comm = await asyncio.wait_for( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for return fut.result() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 461, in connect convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <distributed.comm.tcp.TCPConnector object at 0x2b6dc9556770>: ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 317, in connect raise OSError( OSError: Timed out trying to connect to tcp://127.0.0.1:42125 after 30 s
slice(4, 5, None)
2022-08-11 14:33:23,756 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:41337 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:41337 after 30 s
slice(5, 6, None) slice(6, 7, None) slice(7, 8, None) slice(8, 9, None) slice(9, 10, None) slice(10, 11, None) slice(11, 12, None)
2022-08-11 14:44:24,091 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:45688 (pid=23231) exceeded 99% memory budget. Restarting... 2022-08-11 14:44:25,067 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:43624 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:25,072 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:43644 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:25,073 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:43788 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:25,073 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:44494 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:25,091 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:32773 -> tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:32773 remote=tcp://127.0.0.1:51958>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:44:25,093 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:44152 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:25,286 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:44:26,801 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:43674 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:27,241 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:43712 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:27,696 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:40222 -> tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:40222 remote=tcp://127.0.0.1:49898>: Stream is closed 2022-08-11 14:44:28,745 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:44306 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:29,337 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:45688 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:44060 remote=tcp://127.0.0.1:45688>: Stream is closed 2022-08-11 14:44:40,590 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:44612 (pid=23209) exceeded 99% memory budget. Restarting... 2022-08-11 14:44:41,568 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:44:44,611 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44612 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:41282 remote=tcp://127.0.0.1:44612>: Stream is closed 2022-08-11 14:44:51,287 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44612 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:41514 remote=tcp://127.0.0.1:44612>: Stream is closed 2022-08-11 14:44:52,925 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:32773 -> tcp://127.0.0.1:44612 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 971, in _handle_write num_bytes = self.write_to_fd(self._write_buffer.peek(size)) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1148, in write_to_fd return self.socket.send(data) # type: ignore BrokenPipeError: [Errno 32] Broken pipe The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:32773 remote=tcp://127.0.0.1:51850>: BrokenPipeError: [Errno 32] Broken pipe 2022-08-11 14:45:27,150 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:44612 ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 291, in connect comm = await asyncio.wait_for( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for return fut.result() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 461, in connect convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <distributed.comm.tcp.TCPConnector object at 0x2b89d35754e0>: ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 317, in connect raise OSError( OSError: Timed out trying to connect to tcp://127.0.0.1:44612 after 30 s 2022-08-11 14:45:40,351 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:40698 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:40698 after 30 s
slice(12, 13, None)
2022-08-11 14:47:05,962 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:42011 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 264, in write async def write(self, msg, serializers=None, on_error="message"): asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 418, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 329, in connect await asyncio.wait_for(comm.write(local_info), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 420, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:42011 after 30 s
slice(13, 14, None) slice(14, 15, None) slice(15, 16, None) slice(16, 17, None) slice(17, 18, None) slice(18, 19, None)
2022-08-11 14:54:49,191 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39906 (pid=23247) exceeded 99% memory budget. Restarting... 2022-08-11 14:54:50,035 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:45772 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,035 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:45730 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,035 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:45770 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,035 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:45736 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,037 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:45812 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,039 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:33205 -> tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:33205 remote=tcp://127.0.0.1:56012>: Stream is closed 2022-08-11 14:54:50,035 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:46362 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,034 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:45720 remote=tcp://127.0.0.1:39906>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:54:50,054 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:46822 remote=tcp://127.0.0.1:39906>: Stream is closed 2022-08-11 14:54:50,061 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:38170 -> tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:38170 remote=tcp://127.0.0.1:49766>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:54:50,070 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:54:50,082 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:37924 -> tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:37924 remote=tcp://127.0.0.1:46760>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:54:50,085 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:46548 remote=tcp://127.0.0.1:39906>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:54:57,011 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:39636 -> tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:39636 remote=tcp://127.0.0.1:39456>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:55:00,060 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39906 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:46780 remote=tcp://127.0.0.1:39906>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:55:04,290 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:41461 (pid=23239) exceeded 99% memory budget. Restarting... 2022-08-11 14:55:05,605 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:41461 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:37768 remote=tcp://127.0.0.1:41461>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:55:05,623 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:41461 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:38630 remote=tcp://127.0.0.1:41461>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 14:55:05,691 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:55:15,371 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:38534 -> tcp://127.0.0.1:41461 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 971, in _handle_write num_bytes = self.write_to_fd(self._write_buffer.peek(size)) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1148, in write_to_fd return self.socket.send(data) # type: ignore BrokenPipeError: [Errno 32] Broken pipe The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:38534 remote=tcp://127.0.0.1:48354>: BrokenPipeError: [Errno 32] Broken pipe 2022-08-11 14:55:45,486 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:41461 ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 291, in connect comm = await asyncio.wait_for( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for return fut.result() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 461, in connect convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <distributed.comm.tcp.TCPConnector object at 0x2b6ddd352770>: ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 317, in connect raise OSError( OSError: Timed out trying to connect to tcp://127.0.0.1:41461 after 30 s
slice(19, 20, None)
2022-08-11 14:57:10,490 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:33205 (pid=23228) exceeded 99% memory budget. Restarting... 2022-08-11 14:57:11,721 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:55960 remote=tcp://127.0.0.1:33205>: Stream is closed 2022-08-11 14:57:11,746 - distributed.nanny - WARNING - Restarting worker 2022-08-11 14:57:13,356 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:56912 remote=tcp://127.0.0.1:33205>: Stream is closed 2022-08-11 14:57:16,849 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:56884 remote=tcp://127.0.0.1:33205>: Stream is closed 2022-08-11 14:57:17,727 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:56510 remote=tcp://127.0.0.1:33205>: Stream is closed 2022-08-11 14:57:18,906 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:34303 -> tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:34303 remote=tcp://127.0.0.1:35272>: Stream is closed 2022-08-11 14:57:21,771 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:57240 remote=tcp://127.0.0.1:33205>: Stream is closed 2022-08-11 14:57:53,260 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:35128 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 328, in connect handshake = await asyncio.wait_for(comm.read(), time_left()) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 333, in connect raise OSError( OSError: Timed out during handshake while connecting to tcp://127.0.0.1:35128 after 30 s 2022-08-11 14:57:53,264 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 449, in connect stream = await self.client.connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/tcpclient.py", line 265, in connect addrinfo = await self.resolver.resolve(host, port, af) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 434, in resolve for fam, _, _, _, address in await asyncio.get_running_loop().getaddrinfo( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/base_events.py", line 860, in getaddrinfo return await self.run_in_executor( asyncio.exceptions.CancelledError During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 456, in wait_for return fut.result() asyncio.exceptions.CancelledError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 291, in connect comm = await asyncio.wait_for( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 458, in wait_for raise exceptions.TimeoutError() from exc asyncio.exceptions.TimeoutError The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 317, in connect raise OSError( OSError: Timed out trying to connect to tcp://127.0.0.1:33205 after 30 s 2022-08-11 14:58:19,620 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:33205 ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 291, in connect comm = await asyncio.wait_for( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for return fut.result() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 461, in connect convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <distributed.comm.tcp.TCPConnector object at 0x2b23055b32b0>: ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 317, in connect raise OSError( OSError: Timed out trying to connect to tcp://127.0.0.1:33205 after 30 s
slice(20, 21, None) slice(21, 22, None) slice(22, 23, None) slice(23, 24, None) slice(24, 25, None) slice(25, 26, None) slice(26, 27, None) slice(27, 28, None)
2022-08-11 15:09:11,889 - distributed.worker_memory - WARNING - Worker tcp://127.0.0.1:39636 (pid=23214) exceeded 99% memory budget. Restarting... 2022-08-11 15:09:13,223 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:39600 remote=tcp://127.0.0.1:39636>: Stream is closed 2022-08-11 15:09:13,255 - distributed.nanny - WARNING - Restarting worker 2022-08-11 15:09:16,467 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:32773 -> tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:32773 remote=tcp://127.0.0.1:51766>: Stream is closed 2022-08-11 15:09:16,467 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:39994 remote=tcp://127.0.0.1:39636>: Stream is closed 2022-08-11 15:09:16,338 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:35139 -> tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:35139 remote=tcp://127.0.0.1:39637>: Stream is closed 2022-08-11 15:09:17,313 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:40144 remote=tcp://127.0.0.1:39636>: Stream is closed 2022-08-11 15:09:17,593 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 867, in _read_to_buffer bytes_read = self.read_from_fd(buf) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/tornado/iostream.py", line 1140, in read_from_fd return self.socket.recv_into(buf, len(buf)) ConnectionResetError: [Errno 104] Connection reset by peer The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:41026 remote=tcp://127.0.0.1:39636>: ConnectionResetError: [Errno 104] Connection reset by peer 2022-08-11 15:09:18,091 - distributed.worker - ERROR - failed during get data with tcp://127.0.0.1:34303 -> tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 223, in read frames_nbytes = await stream.read_bytes(fmt_size) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1692, in get_data response = await comm.read(deserializers=serializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) local=tcp://127.0.0.1:34303 remote=tcp://127.0.0.1:35138>: Stream is closed 2022-08-11 15:09:18,531 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39636 Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 233, in read n = await stream.read_into(chunk) tornado.iostream.StreamClosedError: Stream is closed The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2705, in _get_data response = await send_recv( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 918, in send_recv response = await comm.read(deserializers=deserializers) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 239, in read convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 144, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc}") from exc distributed.comm.core.CommClosedError: in <TCP (closed) Ephemeral Worker->Worker for gather local=tcp://127.0.0.1:40762 remote=tcp://127.0.0.1:39636>: Stream is closed 2022-08-11 15:09:51,205 - distributed.worker - ERROR - Worker stream died during communication: tcp://127.0.0.1:39636 ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 291, in connect comm = await asyncio.wait_for( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/asyncio/tasks.py", line 445, in wait_for return fut.result() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 461, in connect convert_stream_closed_error(self, e) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/tcp.py", line 142, in convert_stream_closed_error raise CommClosedError(f"in {obj}: {exc.__class__.__name__}: {exc}") from exc distributed.comm.core.CommClosedError: in <distributed.comm.tcp.TCPConnector object at 0x2b6dc86e73a0>: ConnectionRefusedError: [Errno 111] Connection refused The above exception was the direct cause of the following exception: Traceback (most recent call last): File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 1983, in gather_dep response = await get_data_from_worker( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2725, in get_data_from_worker return await retry_operation(_get_data, operation="get_data_from_worker") File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 383, in retry_operation return await retry( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/utils_comm.py", line 368, in retry return await coro() File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/worker.py", line 2702, in _get_data comm = await rpc.connect(worker) File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1371, in connect return await connect_attempt File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/core.py", line 1307, in _connect comm = await connect( File "/ccc/cont003/home/ra5563/ra5563/monitor/lib/python3.10/site-packages/distributed/comm/core.py", line 317, in connect raise OSError( OSError: Timed out trying to connect to tcp://127.0.0.1:39636 after 30 s
slice(28, 29, None) slice(29, 30, None) slice(30, 31, None) CPU times: user 8min 13s, sys: 1min 47s, total: 10min 1s Wall time: 54min 17s