diff --git a/scripts/_pdf_calculator.py b/scripts/_pdf_calculator.py index b6a79ab..b534814 100644 --- a/scripts/_pdf_calculator.py +++ b/scripts/_pdf_calculator.py @@ -20,7 +20,7 @@ def _no_oxidation_cif(cif_file): -def _pdffit2_CsPbX3(gr_data, cif_list, qmax=18, qdamp=0.031, qbroad=0.032, fix_APD=True): +def _pdffit2_CsPbX3(gr_data, cif_list, qmax=18, qdamp=0.031, qbroad=0.032, fix_APD=True, toler=0.000001): # Initialize the CifParser with the path to your .cif file # Parse the .cif file @@ -58,7 +58,7 @@ def _pdffit2_CsPbX3(gr_data, cif_list, qmax=18, qdamp=0.031, qbroad=0.032, fix_A # Refine pf.pdfrange(1, 2.5, 60) - pf.refine() + pf.refine(toler=toler) phase_fraction = pf.phase_fractions()['mass'] diff --git a/scripts/_synthesis_queue_RM.py b/scripts/_synthesis_queue_RM.py index 330e42b..2e266da 100644 --- a/scripts/_synthesis_queue_RM.py +++ b/scripts/_synthesis_queue_RM.py @@ -126,7 +126,7 @@ def synthesis_queue( flowplan = BPlan('start_group_infuse', [pump_list[-1]], [rate_list[i][-1]]) RM.item_add(flowplan, pos=pos) - restplan = BPlan('sleep_sec_q', 60) + restplan = BPlan('sleep_sec_q', 30) RM.item_add(restplan, pos=pos) diff --git a/scripts/kafka_consumer_iterate_RM.py b/scripts/kafka_consumer_iterate_RM.py index decaf8b..3e618fe 100644 --- a/scripts/kafka_consumer_iterate_RM.py +++ b/scripts/kafka_consumer_iterate_RM.py @@ -135,7 +135,7 @@ agent = build_agen2(peak_target=peak_target) -fitting_pdf = True +fitting_pdf = False if fitting_pdf: pdf_cif_dir = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/' cif_list = [os.path.join(pdf_cif_dir, 'CsPbBr3_Orthorhombic.cif')] @@ -253,10 +253,10 @@ def print_message(consumer, doctype, doc, ## obtain phase fraction & particle size from g(r) if 'scattering' in stream_list: if fitting_pdf: - phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032) + phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032, fix_APD=False, toler=0.001) pdf_property={'Br_ratio': phase_fraction[0], 'Br_size':particel_size[0]} else: - pdf_property={'Br_ratio': None, 'Br_size':None} + pdf_property={'Br_ratio': np.nan, 'Br_size': np.nan} ## remove 'scattering' from stream_list to avoid redundant work in next for loop stream_list.remove('scattering') @@ -431,23 +431,26 @@ def print_message(consumer, doctype, doc, agent_iteration.append(True) # TODO: remove the fllowing 3 lines if no error reported - # else: - # plqy_dic = None - # optical_property = None - - ## Save fitting data - print(f'\nFitting function: {f_fit}\n') - ff={'fit_function': f_fit, 'curve_fit': popt} - de.dic_to_csv_for_stream(saving_path, qepro_dic, metadata_dic, stream_name=stream_name, fitting=ff, plqy_dic=plqy_dic) - print(f'\n** export fitting results complete**\n') + else: + plqy_dic = None + optical_property = None - ## Plot fitting data - u.plot_peak_fit(x, y, f_fit, popt, peak=p, fill_between=True) - print(f'\n** plot fitting results complete**\n') - if stream_name == 'primary': - good_data.append(data_id) + ## Save fitting data + print(f'\nFitting function: {f_fit}\n') + ff={'fit_function': f_fit, 'curve_fit': popt} + de.dic_to_csv_for_stream(saving_path, qepro_dic, metadata_dic, stream_name=stream_name, fitting=ff, plqy_dic=plqy_dic) + print(f'\n** export fitting results complete**\n') - elif peak==[] and prop==[]: + ## Plot fitting data + u.plot_peak_fit(x, y, f_fit, popt, peak=p, fill_between=True) + print(f'\n** plot fitting results complete**\n') + print(f'{peak = }') + print(f'{prop = }') + + if stream_name == 'primary': + good_data.append(data_id) + + elif (type(peak) == list) and (prop == []): bad_data.append(data_id) print(f"\n*** No need to carry out fitting for {stream_name} in uid: {uid[:8]} ***\n") print(f"\n*** since {stream_name} in uid: {uid[:8]} is a bad data.***\n") diff --git a/scripts/kafka_consumer_publisher.py b/scripts/kafka_consumer_publisher.py index c900687..668c761 100644 --- a/scripts/kafka_consumer_publisher.py +++ b/scripts/kafka_consumer_publisher.py @@ -75,7 +75,7 @@ # agent_data_path = '/home/xf28id2/data_halide' # agent_data_path = '/home/xf28id2/data_post_dilute_66mM' # agent_data_path = '/home/xf28id2/data_post_dilute_33mM' -agent_data_path = '/home/xf28id2/Documents/ChengHung/20240510_kafka_pdffit' +agent_data_path = '/home/xf28id2/Documents/ChengHung/pdffit2_test' write_agent_data = True # rate_label = ['infusion_rate_CsPb', 'infusion_rate_Br', 'infusion_rate_Cl', 'infusion_rate_I2'] @@ -198,10 +198,10 @@ def print_message(consumer, doctype, doc, ## obtain phase fraction & particle size from g(r) if 'scattering' in stream_list: if fitting_pdf: - phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032, fix_APD=False) + phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032, fix_APD=False, toler=0.001) pdf_property={'Br_ratio': phase_fraction[0], 'Br_size':particel_size[0]} else: - pdf_property={'Br_ratio': None, 'Br_size':None} + pdf_property={'Br_ratio': np.nan, 'Br_size': np.nan} ## remove 'scattering' from stream_list to avoid redundant work in next for loop stream_list.remove('scattering') diff --git a/scripts/prepare_agent_pdf.py b/scripts/prepare_agent_pdf.py index 2d15b12..2968a3d 100644 --- a/scripts/prepare_agent_pdf.py +++ b/scripts/prepare_agent_pdf.py @@ -14,7 +14,7 @@ def build_agen_Cl(peak_target=660, peak_tolerance=5, size_target=6, ): # data_path = '/home/xf28id2/data_ZnCl2' #data_path = '/home/xf28id2/data' - agent_data_path = '/home/xf28id2/Documents/ChengHung' + agent_data_path = '/home/xf28id2/Documents/ChengHung/pdffit2_test' dofs = [ @@ -36,8 +36,8 @@ def build_agen_Cl(peak_target=660, peak_tolerance=5, size_target=6, ): Objective(description="Peak emission", name="Peak", target=(peak_down, peak_up), weight=100, max_noise=0.25), Objective(description="Peak width", name="FWHM", target="min", transform="log", weight=5., max_noise=0.25), Objective(description="Quantum yield", name="PLQY", target="max", transform="log", weight=1., max_noise=0.25), - Objective(description="Particle size", name="size_nm", target=(size_target-1.5, size_target+1.5), transform="log", weight=0.1, max_noise=0.25), - Objective(description="Phase ratio", name="reduced_ratio", target=(ratio_down, ratio_up), transform="log", weight=0.1, max_noise=0.25), + Objective(description="Particle size", name="Br_size", target=(size_target-1.5, size_target+1.5), transform="log", weight=0.1, max_noise=0.25), + Objective(description="Phase ratio", name="Br_ratio", target=(ratio_down, ratio_up), transform="log", weight=0.1, max_noise=0.25), ] @@ -69,23 +69,23 @@ def build_agen_Cl(peak_target=660, peak_tolerance=5, size_target=6, ): metadata_keys = ["time", "uid", "r_2"] - # if os.path.exists(init_file): - # agent.load_data(init_file) - # else: - # filepaths = glob.glob(f"{agent_data_path}/*.json") - # filepaths.sort() + + filepaths = glob.glob(f"{agent_data_path}/*.json") + filepaths.sort() - fn = agent_data_path + '/' + 'agent_data_update_quinine_CsPbCl3.csv' - df = pd.read_csv(fn, sep=',', index_col=False) + # fn = agent_data_path + '/' + 'agent_data_update_quinine_CsPbCl3.csv' + # df = pd.read_csv(fn, sep=',', index_col=False) - for i in range(len(df['uid'])): - # with open(fp, "r") as f: - # data = json.load(f) - data = {} - for key in df.keys(): - data[key] = df[key][i] + # for i in range(len(df['uid'])): + for fp in tqdm(filepaths): + with open(fp, "r") as f: + data = json.load(f) + # print(data) + # data = {} + # for key in df.keys(): + # data[key] = df[key][i] r_2_min = 0.05 try: