Skip to content
Snippets Groups Projects
Commit 2c9f3733 authored by Stephan Philips's avatar Stephan Philips
Browse files

small fixes

parent 9fb3da48
No related branches found
No related tags found
No related merge requests found
......@@ -18,6 +18,7 @@ In this document the set up, creation, loading and browsing of dataset's is disc
- [ ] index database for fast searches + add keyword feature
- [ ] GUI to diplay data
- [ ] possible future concept :: push local to external database
- [ ] feature : add start and stop also to slicing (e.g. ds.m1[:, 5:10:20] --> slices dataset and its setpoints)
# Set up
To set up the connection to the server, there are two options:
......
......@@ -114,7 +114,7 @@ class buffer_reference:
return avg_lambda
@staticmethod
def slice_lambda(*args):
def slice_lambda(args):
def slice_lambda(data):
return data[tuple(args)]
return slice_lambda
\ No newline at end of file
from core_tools.data.SQL.SQL_database_mgr import SQL_database_manager
from core_tools.data.ds.data_set_raw import data_set_raw
from core_tools.data.ds.data_set_DataMgr import m_param_origanizer, dataset_data_description
import datetime
import time
import string
import json
import time
def create_new_data_set(experiment_name, *m_params):
'''
......@@ -39,9 +41,10 @@ class data_set_desciptor(object):
return getattr(getattr(obj,"_data_set__data_set_raw"), self.var)
class data_set:
run_id = data_set_desciptor('exp_id')
exp_id = data_set_desciptor('exp_id')
running = data_set_desciptor('uploaded_complete')
dbname = data_set_desciptor('dbname')
table_name = data_set_desciptor('SQL_table_name')
name = data_set_desciptor('exp_name')
......@@ -65,12 +68,42 @@ class data_set:
def __init__(self, ds_raw):
self.id = None
self.__data_set_raw = ds_raw
self.__property_managment_list = []
print(ds_raw)
self.__repr_attr_overview = []
self.__init_properties(m_param_origanizer(ds_raw.measurement_parameters_raw))
self.last_commit = time.time()
def __init_properties(self):
pass
def __init_properties(self, data_set_content):
'''
populates the dataset with the measured parameter in the raw dataset
Args:
data_set_content (m_param_origanizer) : m_param_raw raw objects in their mamagement object
'''
m_id = data_set_content.get_m_param_id()
for i in range(len(m_id)): #this is not pretty.
n_sets = len(data_set_content[m_id[i]])
repr_attr_overview = []
for j in range(n_sets):
ds_descript = dataset_data_description('', data_set_content.get(m_id[i], j), data_set_content)
name = 'm' + str(i+1) + string.ascii_lowercase[j]
setattr(self, name, ds_descript)
if j == 0:
setattr(self, 'm' + str(i+1), ds_descript)
if j == 0 and n_sets==1: #consistent printing
repr_attr_overview += [('m' + str(i+1), ds_descript)]
ds_descript.name = 'm' + str(i+1)
else:
repr_attr_overview += [(name, ds_descript)]
ds_descript.name = name
self.__repr_attr_overview += [repr_attr_overview]
def add_metadata(self, metadata):
pass
......@@ -113,4 +146,18 @@ class data_set:
self.__data_set_raw.sync_buffers()
SQL_mgr = SQL_database_manager()
SQL_mgr.update_write_cursors(self.__data_set_raw)
\ No newline at end of file
SQL_mgr.update_write_cursors(self.__data_set_raw)
def __repr__(self):
output_print = "DataSet :: {}\n\nid = {}\nTrueID = 1225565471200\n\n".format(self.name, self.run_id)
output_print += "| idn | label | unit | size |\n"
output_print += "---------------------------------------------------------------------------\n"
for i in self.__repr_attr_overview:
for j in i:
output_print += j[1].__repr__()
output_print += "\n"
output_print += "database : \n".format("self.table_name")
output_print += "set_up : \n".format(self.project)
output_print += "project : \n".format(self.set_up)
output_print += "sample_name : \n".format(self.sample_name)
return output_print
\ No newline at end of file
......@@ -153,8 +153,20 @@ class dataset_data_description():
'''
take the ith slice of dimension i
'''
print(dim, i)
return self
if isinstance(dim, str):
dim = list(string.ascii_lowercase).index(dim) - 23
if dim > self.ndim:
raise ValueError("you are trying to average over a dimension that does not exists")
idx = [slice(None)]*self.ndim
idx[dim] = i
raw_data_org_copy = copy.copy(self.__raw_data_org)
raw_data_cpy = raw_data_org_copy.get(self.__raw_data.param_id, self.__raw_data.nth_set)
raw_data_cpy.dependency.pop(dim)
raw_data_cpy.data_buffer.buffer_lambda = raw_data_cpy.data_buffer.slice_lambda(idx)
return dataset_data_description(self.name, raw_data_cpy, raw_data_org_copy)
def __getitem__(self, args):
......@@ -165,7 +177,7 @@ class dataset_data_description():
if slices is None:
return self
return self.slice(slices[0], slices[1])[tuple(args[len(args)-slices[0]-1:])]
return self.slice(slices[0], slices[1])[tuple(args[slices[0]+1:])]
def __repr__(self):
output_print = ""
......@@ -179,6 +191,9 @@ class dataset_data_description():
return output_print
class data_set_property_intializer():
'''
mockup of dataclass for development purposes-- dont use this class.
'''
def __init__(self, m_params):
self.__repr_attr_overview = []
# m_meas_id's
......@@ -231,8 +246,8 @@ if __name__ == '__main__':
data2 = buffer_reference(np.zeros([100, 100, 10]))
data3 = buffer_reference(np.zeros([10]))
data4 = buffer_reference(np.zeros([100,100]))
data4.buffer[0,:] = -5
data4.buffer[:,0] = 5
data1.buffer[0,:] = -5
data1.buffer[:,0] = 5
a = m_param_raw(param_id=1636274596872, nth_set=0, nth_dim=-1, param_id_m_param=1636274596872, setpoint=False, setpoint_local=False, name_gobal='test', name='chan_1', label='keithley 1', unit='pA', dependency=[1635967634696, 1635967635080], shape='[100, 100]', size=100000, oid=16478, data_buffer=data1)
......@@ -247,12 +262,15 @@ if __name__ == '__main__':
ds = data_set_property_intializer(l)
print(ds)
print(ds.m1a())
# print(ds.m1a.label)
# print(ds.m1b.unit)
# print(ds.m1b.label)
print(ds.m1a.average('x'))
print(ds.m1a[0,:])
# print(ds.m1a.average('x'))
print(ds.m1a[:,0]())
print(ds.m1a[0,:]())
print(ds.m1a[:,:]())
print(ds.m1a.slice('x', 0)())
# print(ds.m1a[:,0])
# print(ds.m1a.y())
# print(ds.m1a.z())
\ No newline at end of file
from core_tools.data.SQL.connector import sample_info
from core_tools.data.SQL.connector import sample_info, SQL_conn_info
from core_tools.data.SQL.buffer_writer import buffer_reference
from dataclasses import dataclass, field
import copy
......@@ -11,6 +11,8 @@ class data_set_raw:
set_up : str = field(default_factory=lambda: sample_info.set_up)
project : str = field(default_factory=lambda: sample_info.project)
sample : str = field(default_factory=lambda: sample_info.sample)
database : str = field(default_factory=lambda: SQL_conn_info.dbname)
SQL_datatable : str = None
measurement_parameters : list = field(default_factory=lambda: [])
......
......@@ -232,5 +232,6 @@ if __name__ == '__main__':
# print('results', i ,j, z)
ds.add_result((a1, i), (a2, j), (m_param, z))
print(ds.dataset)
t1 =time.time()
print(t1-t0)
\ No newline at end of file
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment