diff --git a/scripts/_LDRD_Kafka.py b/scripts/_LDRD_Kafka.py index f460e2c..a777bc2 100644 --- a/scripts/_LDRD_Kafka.py +++ b/scripts/_LDRD_Kafka.py @@ -101,7 +101,7 @@ def _kafka_process(): class dic_to_inputs(): def __init__(self, parameters_dict, parameters_list): - """ Turn the input dict into class methods + """ Turn the input dict into class attributes Args: parameters_dict (dict): dict read from excel spreadsheet @@ -122,14 +122,14 @@ def __init__(self, parameters_dict, parameters_list): class xlsx_to_inputs(): def __init__(self, parameters_list, xlsx_fn, sheet_name='inputs', is_kafka=False): """ Read the excel spreadsheet according to the sheet name into a dict - Turn the dict read from excel into class methods + Turn the dict read from excel into class attributes Args: parameters_list (list): list for namespace deined above xlsx_fn (str): full path if excel file sheet_name (str, optional): sheet name of the excel file. Defaults to 'inputs'. is_kafka (bool, optional): if True, the namespace for the variables processed - in Kafka will be turn into class methods. Defaults to False. + in Kafka will be turn into class attributes. Defaults to False. """ self.parameters_list = parameters_list self.from_xlsx = xlsx_fn @@ -294,7 +294,7 @@ def macro_agent(self, qserver_process, RM, check_target=False, is_1st=False): print(f'\nReach the target, stop iteration, stop all pumps, and wash the loop.\n') ### Stop all infusing pumps and wash loop - sq.wash_tube_queue2(qin.pump_list, qin.wash_tube, 'ul/min', + sq.wash_tube_queue2(qin.pump_list, qin.if_wash, qin.wash_loop, 'ul/min', zmq_control_addr=qin.zmq_control_addr[0], zmq_info_addr=qin.zmq_info_addr[0]) @@ -317,7 +317,7 @@ def macro_agent(self, qserver_process, RM, check_target=False, is_1st=False): if len(self.agent.table) < 2: acq_func = "qr" else: - acq_func = "qei" + acq_func = "qem" new_points = self.agent.ask(acq_func, n=1) @@ -1083,10 +1083,10 @@ def macro_17_add_queue(self, stream_name, qserver_process, RM): print('*** qsever aborted due to too many bad scans, please check setup ***\n') ### Stop all infusing pumps and wash loop - sq.wash_tube_queue2(qin.pump_list, qin.wash_tube, 'ul/min', + sq.wash_tube_queue2(qin.pump_list, qin.if_wash, qin.wash_loop, 'ul/min', zmq_control_addr=qin.zmq_control_addr[0], zmq_info_addr=qin.zmq_info_addr[0]) - + RM.queue_stop() elif (len(self.good_data) <= 2) and (self.inputs.use_good_bad[0]): diff --git a/scripts/_data_export.py b/scripts/_data_export.py index a289a9d..db65498 100644 --- a/scripts/_data_export.py +++ b/scripts/_data_export.py @@ -6,7 +6,8 @@ import os import json import importlib -importlib.import_module("_data_export") +# importlib.import_module("_data_analysis") +from _data_analysis import * """ This module provides functions for data export. Usually imported as de. @@ -63,7 +64,7 @@ def _fn_generator(uid, beamline_acronym='xpd'): date, ttime = _readable_time(metadata_dic['time']) full_uid = metadata_dic['uid'] fn = f'{sample_type}_{date}-{ttime}_{full_uid[0:8]}' - return fn + return fnkafka_test ## Auto generate sample name with given prefix and infuse_rate diff --git a/scripts/_synthesis_queue_RM.py b/scripts/_synthesis_queue_RM.py index 350f67f..d6500a2 100644 --- a/scripts/_synthesis_queue_RM.py +++ b/scripts/_synthesis_queue_RM.py @@ -163,13 +163,13 @@ def synthesis_queue_xlsx(parameter_obj): - ## 4.0 Configure area detector in Qserver - if det1 == 'pe1c' or det1 == 'pe2c': - scanplan = BPlan('configure_area_det', - det=det1, - exposure=1, - acq_time=det1_frame_rate) - RM.item_add(scanplan, pos=pos) + # ## 4.0 Configure area detector in Qserver + # if det1 == 'pe1c' or det1 == 'pe2c': + # scanplan = BPlan('configure_area_det', + # det=det1, + # exposure=1, + # acq_time=det1_frame_rate) + # RM.item_add(scanplan, pos=pos) ## 4-1. Take a fluorescence peak to check reaction diff --git a/scripts/inputs_qserver_kafka_v2.xlsx b/scripts/inputs_qserver_kafka_v2.xlsx index c1980b1..24b7cdd 100644 Binary files a/scripts/inputs_qserver_kafka_v2.xlsx and b/scripts/inputs_qserver_kafka_v2.xlsx differ diff --git a/scripts/kafka_consumer_iterate_XPD_v2.py b/scripts/kafka_consumer_iterate_XPD_v2.py index fe4cc27..edf813e 100644 --- a/scripts/kafka_consumer_iterate_XPD_v2.py +++ b/scripts/kafka_consumer_iterate_XPD_v2.py @@ -39,11 +39,11 @@ xlsx_fn = '/home/xf28id2/.ipython/profile_collection_ldrd20-31/scripts/inputs_qserver_kafka_v2.xlsx' ## Input varaibales for Qserver, reading from xlsx_fn by given sheet name -qserver_process = LK.xlsx_to_inputs(LK._qserver_inputs(), xlsx_fn=xlsx_fn, sheet_name='qserver_test') +qserver_process = LK.xlsx_to_inputs(LK._qserver_inputs(), xlsx_fn=xlsx_fn, sheet_name='qserver_XPD') qin = qserver_process.inputs ## Input varaibales for Kafka, reading from xlsx_fn by given sheet name -kafka_process = LK.xlsx_to_inputs(LK._kafka_inputs(), xlsx_fn=xlsx_fn, sheet_name='kafka_test', is_kafka=True) +kafka_process = LK.xlsx_to_inputs(LK._kafka_inputs(), xlsx_fn=xlsx_fn, sheet_name='kafka_process', is_kafka=True) kin = kafka_process.inputs ## Define RE Manager API as RM @@ -54,9 +54,11 @@ first_points = kafka_process.macro_agent(qserver_process, RM, check_target=False, is_1st=True) rate_list = kafka_process.auto_rate_list(qin.pump_list, first_points, kin.fix_Br_ratio) if kin.post_dilute[0]: - rate_list.append(sum(rate_list)*kin.post_dilute[1]) + sum_active = sum(rate_list) + rate_list.append(sum_active/9) ## append PF rate + rate_list.append(sum_active*kin.post_dilute[1]) ## append toluene rate qin.infuse_rates = rate_list - + print(f'\n{rate_list = }\n') ## Import Qserver parameters to RE Manager sq.synthesis_queue_xlsx(qserver_process) diff --git a/scripts/prepare_agent_pdf.py b/scripts/prepare_agent_pdf.py index 5266903..5d90c88 100644 --- a/scripts/prepare_agent_pdf.py +++ b/scripts/prepare_agent_pdf.py @@ -13,17 +13,17 @@ def build_agent(peak_target=660, peak_tolerance=5, size_target=6, agent_data_path='/', use_OAm=False): # data_path = '/home/xf28id2/data_ZnCl2' - #data_path = '/home/xf28id2/data' - agent_data_path = agent_data_path + agent_data_path = '/home/xf28id2/Documents/ChengHung/agent_data/Cl_02' + # agent_data_path = agent_data_path if use_OAm: dofs = [ - DOF(description="CsPb(oleate)3", name="infusion_rate_CsPb", units="uL/min", search_domain=(10, 30)), + DOF(description="CsPb(oleate)3", name="infusion_rate_CsPb", units="uL/min", search_domain=(20, 80)), # DOF(description="TOABr", name="infusion_rate_Br", units="uL/min", search_domain=(50, 200)), - DOF(description="ZnI2", name="infusion_rate_I2", units="uL/min", search_domain=(0, 250)), - DOF(description="ZnCl2", name="infusion_rate_Cl", units="uL/min", search_domain=(0, 250)), - DOF(description="OAm_Tol", name="infusion_rate_OAm", units="uL/min", search_domain=(0, 30)), + DOF(description="ZnI2", name="infusion_rate_I2", units="uL/min", search_domain=(10, 190)), + DOF(description="ZnCl2", name="infusion_rate_Cl", units="uL/min", search_domain=(10, 190)), + DOF(description="OAm_Tol", name="infusion_rate_OAm", units="uL/min", search_domain=(0, 70)), ] else: @@ -43,7 +43,7 @@ def build_agent(peak_target=660, peak_tolerance=5, size_target=6, agent_data_pat ratio_down = 1-(510-peak_down)*0.99/110 objectives = [ - Objective(description="Peak emission", name="Peak", target=(peak_down, peak_up), weight=100, max_noise=0.25), + Objective(description="Peak emission", name="Peak", target=(peak_down, peak_up), weight=100., max_noise=0.25), Objective(description="Peak width", name="FWHM", target="min", transform="log", weight=5., max_noise=0.25), Objective(description="Quantum yield", name="PLQY", target="max", transform="log", weight=1., max_noise=0.25), # Objective(description="Particle size", name="Br_size", target=(size_target-1.5, size_target+1.5), transform="log", weight=0.1, max_noise=0.25), @@ -72,7 +72,7 @@ def build_agent(peak_target=660, peak_tolerance=5, size_target=6, agent_data_pat agent.dofs.infusion_rate_Cl.deactivate() agent.dofs.infusion_rate_Cl.device.put(0) - elif peak_target < 500: + elif peak_target < 510: agent.dofs.infusion_rate_I2.deactivate() agent.dofs.infusion_rate_I2.device.put(0)