diff --git a/Upload_Day_IRdata.py b/Upload_Day_IRdata.py index 36c40f8f7765623d183d2085f2a1759bce737bd4..968edc6f13f0edb8f028636a9632c29ef7500f37 100644 --- a/Upload_Day_IRdata.py +++ b/Upload_Day_IRdata.py @@ -11,13 +11,15 @@ import h5py as h5 import numpy as np import datetime import sys +from time import sleep #from getfoldernames import Searchdatalocation as gfn #%% set global parameter is_local_test = True # local test on one IRcam PC (not central PC) active_PC = [0] # selection of the following camera PCs to loop over - +delayupload=True +startuploadtime="20:00:00" reupload=False reason="" retry=2 @@ -35,8 +37,7 @@ nuc_parlog=True#False nuc=True metastream=True -dates=[[2017,10,11],[2017,10,12],[2017,10,17],[2017,10,18]] -#date = [2017,9,26] +dates=[[2017,11,9]] ports = ['AEF10', 'AEF11', 'AEF20', 'AEF21', @@ -73,7 +74,14 @@ class Tee(object): def flush(self) : for f in self.files: f.flush() - +if delayupload: + now=datetime.datetime.now() + start=datetime.datetime(now.year,now.month,now.day,int(startuploadtime.split(":")[0]),int(startuploadtime.split(":")[1]),int(startuploadtime.split(":")[2])) + sleeptime=(start-now).total_seconds() #seconds + if sleeptime<0: + sleeptime=1 + sleep(sleeptime) + original = sys.stdout diff --git a/__pycache__/uploadingversionIRdata.cpython-35.pyc b/__pycache__/uploadingversionIRdata.cpython-35.pyc new file mode 100644 index 0000000000000000000000000000000000000000..94d6029b04db48bed5b90d28a17917b67472d7aa Binary files /dev/null and b/__pycache__/uploadingversionIRdata.cpython-35.pyc differ diff --git a/uploadingversionIRdata.py b/uploadingversionIRdata.py index b6d7039aaccde67027c37a43cffdefe4d6a285f4..6a3187c141791814b72898ef5cc04ce5fddbdffc 100644 --- a/uploadingversionIRdata.py +++ b/uploadingversionIRdata.py @@ -11,7 +11,7 @@ import urllib import json #from getfoldernames import Searchdatalocation as gfn import numpy as np -from binfilesreadingv2 import BinVideoReader as bvreader +#from binfilesreadingv2 import BinVideoReader as bvreader import os import datetime as dt @@ -814,7 +814,7 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None): """ # stream = path.stream # dtype = str(data.dtype) - filesize=os.stat(filename).stsize + filesize=os.stat(filename).st_size f5=h5reader.File(filename,'r') if filesize<4000000000: tmpfile = "archive_"+stream+'_'+str(dimof[0]) @@ -835,9 +835,9 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None): times=[] limits=[0] shapi=f5[key].shape - intervall=int(np.shape(dimof)/nrfiles) - for i in range(nrfiles-1): - limits.append(intervall*i) + intervall=int(np.shape(dimof)[0]/nrfiles) + for i in range(0,nrfiles-1): + limits.append(intervall*(i+1)) times.append(dimof[limits[i]:limits[i+1]]) limits.append(np.shape(dimof)[0]) times.append(dimof[limits[nrfiles-1]:limits[nrfiles]]) @@ -847,8 +847,8 @@ def writeH5_from_File(stream,filename,key,dimof,idx=None): tmpfile += ".h5" with h5reader.File(tmpfile, 'w') as f: g = f.create_group('data') # requires [row,col,time] - g.create_dataset('timestamps', data=list(times[i]), dtype='uint64',compression="gzip") - dset=g.create_dataset(stream,shape=(shapi[0],shapi[1],limits[i+1]-limits[i]),dtype='uint16',chunks=(shapi[0],shapi[1],1),compression='gzip') + g.create_dataset('timestamps', data=list(times[i]), dtype='uint64')#,compression="gzip") + dset=g.create_dataset(stream,shape=(shapi[0],shapi[1],limits[i+1]-limits[i]),dtype='uint16',chunks=(shapi[0],shapi[1],1))#,compression='gzip') for n in range(limits[i+1]-limits[i]): dset[:,:,n]=f5[key][:,:,limits[i]+n] tmpfiles.append(tmpfile)