Skip to content
Snippets Groups Projects
Commit 848bc730 authored by Holger Niemann's avatar Holger Niemann
Browse files

add delay into daily upload, fix of upload for <4gb files

parent 9b7cda18
No related branches found
No related tags found
No related merge requests found
...@@ -11,13 +11,15 @@ import h5py as h5 ...@@ -11,13 +11,15 @@ import h5py as h5
import numpy as np import numpy as np
import datetime import datetime
import sys import sys
from time import sleep
#from getfoldernames import Searchdatalocation as gfn #from getfoldernames import Searchdatalocation as gfn
#%% set global parameter #%% set global parameter
is_local_test = True # local test on one IRcam PC (not central PC) is_local_test = True # local test on one IRcam PC (not central PC)
active_PC = [0] # selection of the following camera PCs to loop over active_PC = [0] # selection of the following camera PCs to loop over
delayupload=True
startuploadtime="20:00:00"
reupload=False reupload=False
reason="" reason=""
retry=2 retry=2
...@@ -35,8 +37,7 @@ nuc_parlog=True#False ...@@ -35,8 +37,7 @@ nuc_parlog=True#False
nuc=True nuc=True
metastream=True metastream=True
dates=[[2017,10,11],[2017,10,12],[2017,10,17],[2017,10,18]] dates=[[2017,11,9]]
#date = [2017,9,26]
ports = ['AEF10', 'AEF11', ports = ['AEF10', 'AEF11',
'AEF20', 'AEF21', 'AEF20', 'AEF21',
...@@ -73,6 +74,13 @@ class Tee(object): ...@@ -73,6 +74,13 @@ class Tee(object):
def flush(self) : def flush(self) :
for f in self.files: for f in self.files:
f.flush() f.flush()
if delayupload:
now=datetime.datetime.now()
start=datetime.datetime(now.year,now.month,now.day,int(startuploadtime.split(":")[0]),int(startuploadtime.split(":")[1]),int(startuploadtime.split(":")[2]))
sleeptime=(start-now).total_seconds() #seconds
if sleeptime<0:
sleeptime=1
sleep(sleeptime)
original = sys.stdout original = sys.stdout
... ...
......
File added
...@@ -11,7 +11,7 @@ import urllib ...@@ -11,7 +11,7 @@ import urllib
import json import json
#from getfoldernames import Searchdatalocation as gfn #from getfoldernames import Searchdatalocation as gfn
import numpy as np import numpy as np
from binfilesreadingv2 import BinVideoReader as bvreader #from binfilesreadingv2 import BinVideoReader as bvreader
import os import os
import datetime as dt import datetime as dt
...@@ -814,7 +814,7 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None): ...@@ -814,7 +814,7 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None):
""" """
# stream = path.stream # stream = path.stream
# dtype = str(data.dtype) # dtype = str(data.dtype)
filesize=os.stat(filename).stsize filesize=os.stat(filename).st_size
f5=h5reader.File(filename,'r') f5=h5reader.File(filename,'r')
if filesize<4000000000: if filesize<4000000000:
tmpfile = "archive_"+stream+'_'+str(dimof[0]) tmpfile = "archive_"+stream+'_'+str(dimof[0])
...@@ -835,9 +835,9 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None): ...@@ -835,9 +835,9 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None):
times=[] times=[]
limits=[0] limits=[0]
shapi=f5[key].shape shapi=f5[key].shape
intervall=int(np.shape(dimof)/nrfiles) intervall=int(np.shape(dimof)[0]/nrfiles)
for i in range(nrfiles-1): for i in range(0,nrfiles-1):
limits.append(intervall*i) limits.append(intervall*(i+1))
times.append(dimof[limits[i]:limits[i+1]]) times.append(dimof[limits[i]:limits[i+1]])
limits.append(np.shape(dimof)[0]) limits.append(np.shape(dimof)[0])
times.append(dimof[limits[nrfiles-1]:limits[nrfiles]]) times.append(dimof[limits[nrfiles-1]:limits[nrfiles]])
...@@ -847,8 +847,8 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None): ...@@ -847,8 +847,8 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None):
tmpfile += ".h5" tmpfile += ".h5"
with h5reader.File(tmpfile, 'w') as f: with h5reader.File(tmpfile, 'w') as f:
g = f.create_group('data') # requires [row,col,time] g = f.create_group('data') # requires [row,col,time]
g.create_dataset('timestamps', data=list(times[i]), dtype='uint64',compression="gzip") g.create_dataset('timestamps', data=list(times[i]), dtype='uint64')#,compression="gzip")
dset=g.create_dataset(stream,shape=(shapi[0],shapi[1],limits[i+1]-limits[i]),dtype='uint16',chunks=(shapi[0],shapi[1],1),compression='gzip') dset=g.create_dataset(stream,shape=(shapi[0],shapi[1],limits[i+1]-limits[i]),dtype='uint16',chunks=(shapi[0],shapi[1],1))#,compression='gzip')
for n in range(limits[i+1]-limits[i]): for n in range(limits[i+1]-limits[i]):
dset[:,:,n]=f5[key][:,:,limits[i]+n] dset[:,:,n]=f5[key][:,:,limits[i]+n]
tmpfiles.append(tmpfile) tmpfiles.append(tmpfile)
... ...
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please to comment