Skip to content

Commit

Permalink
update from lob1-xpd-ws1
Browse files Browse the repository at this point in the history
  • Loading branch information
cheng-hung committed May 16, 2024
2 parents f9a77b2 + 125ec0c commit bab2710
Show file tree
Hide file tree
Showing 5 changed files with 43 additions and 40 deletions.
4 changes: 2 additions & 2 deletions scripts/_pdf_calculator.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,7 @@ def _no_oxidation_cif(cif_file):



def _pdffit2_CsPbX3(gr_data, cif_list, qmax=18, qdamp=0.031, qbroad=0.032, fix_APD=True):
def _pdffit2_CsPbX3(gr_data, cif_list, qmax=18, qdamp=0.031, qbroad=0.032, fix_APD=True, toler=0.000001):

# Initialize the CifParser with the path to your .cif file
# Parse the .cif file
Expand Down Expand Up @@ -58,7 +58,7 @@ def _pdffit2_CsPbX3(gr_data, cif_list, qmax=18, qdamp=0.031, qbroad=0.032, fix_A

# Refine
pf.pdfrange(1, 2.5, 60)
pf.refine()
pf.refine(toler=toler)


phase_fraction = pf.phase_fractions()['mass']
Expand Down
2 changes: 1 addition & 1 deletion scripts/_synthesis_queue_RM.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def synthesis_queue(
flowplan = BPlan('start_group_infuse', [pump_list[-1]], [rate_list[i][-1]])
RM.item_add(flowplan, pos=pos)

restplan = BPlan('sleep_sec_q', 60)
restplan = BPlan('sleep_sec_q', 30)
RM.item_add(restplan, pos=pos)


Expand Down
39 changes: 21 additions & 18 deletions scripts/kafka_consumer_iterate_RM.py
Original file line number Diff line number Diff line change
Expand Up @@ -135,7 +135,7 @@
agent = build_agen2(peak_target=peak_target)


fitting_pdf = True
fitting_pdf = False
if fitting_pdf:
pdf_cif_dir = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/'
cif_list = [os.path.join(pdf_cif_dir, 'CsPbBr3_Orthorhombic.cif')]
Expand Down Expand Up @@ -253,10 +253,10 @@ def print_message(consumer, doctype, doc,
## obtain phase fraction & particle size from g(r)
if 'scattering' in stream_list:
if fitting_pdf:
phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032)
phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032, fix_APD=False, toler=0.001)
pdf_property={'Br_ratio': phase_fraction[0], 'Br_size':particel_size[0]}
else:
pdf_property={'Br_ratio': None, 'Br_size':None}
pdf_property={'Br_ratio': np.nan, 'Br_size': np.nan}
## remove 'scattering' from stream_list to avoid redundant work in next for loop
stream_list.remove('scattering')

Expand Down Expand Up @@ -431,23 +431,26 @@ def print_message(consumer, doctype, doc,
agent_iteration.append(True)

# TODO: remove the fllowing 3 lines if no error reported
# else:
# plqy_dic = None
# optical_property = None

## Save fitting data
print(f'\nFitting function: {f_fit}\n')
ff={'fit_function': f_fit, 'curve_fit': popt}
de.dic_to_csv_for_stream(saving_path, qepro_dic, metadata_dic, stream_name=stream_name, fitting=ff, plqy_dic=plqy_dic)
print(f'\n** export fitting results complete**\n')
else:
plqy_dic = None
optical_property = None

## Plot fitting data
u.plot_peak_fit(x, y, f_fit, popt, peak=p, fill_between=True)
print(f'\n** plot fitting results complete**\n')
if stream_name == 'primary':
good_data.append(data_id)
## Save fitting data
print(f'\nFitting function: {f_fit}\n')
ff={'fit_function': f_fit, 'curve_fit': popt}
de.dic_to_csv_for_stream(saving_path, qepro_dic, metadata_dic, stream_name=stream_name, fitting=ff, plqy_dic=plqy_dic)
print(f'\n** export fitting results complete**\n')

elif peak==[] and prop==[]:
## Plot fitting data
u.plot_peak_fit(x, y, f_fit, popt, peak=p, fill_between=True)
print(f'\n** plot fitting results complete**\n')
print(f'{peak = }')
print(f'{prop = }')

if stream_name == 'primary':
good_data.append(data_id)

elif (type(peak) == list) and (prop == []):
bad_data.append(data_id)
print(f"\n*** No need to carry out fitting for {stream_name} in uid: {uid[:8]} ***\n")
print(f"\n*** since {stream_name} in uid: {uid[:8]} is a bad data.***\n")
Expand Down
6 changes: 3 additions & 3 deletions scripts/kafka_consumer_publisher.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@
# agent_data_path = '/home/xf28id2/data_halide'
# agent_data_path = '/home/xf28id2/data_post_dilute_66mM'
# agent_data_path = '/home/xf28id2/data_post_dilute_33mM'
agent_data_path = '/home/xf28id2/Documents/ChengHung/20240510_kafka_pdffit'
agent_data_path = '/home/xf28id2/Documents/ChengHung/pdffit2_test'

write_agent_data = True
# rate_label = ['infusion_rate_CsPb', 'infusion_rate_Br', 'infusion_rate_Cl', 'infusion_rate_I2']
Expand Down Expand Up @@ -198,10 +198,10 @@ def print_message(consumer, doctype, doc,
## obtain phase fraction & particle size from g(r)
if 'scattering' in stream_list:
if fitting_pdf:
phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032, fix_APD=False)
phase_fraction, particel_size = pc._pdffit2_CsPbX3(gr_data, cif_list, qmax=20, qdamp=0.031, qbroad=0.032, fix_APD=False, toler=0.001)
pdf_property={'Br_ratio': phase_fraction[0], 'Br_size':particel_size[0]}
else:
pdf_property={'Br_ratio': None, 'Br_size':None}
pdf_property={'Br_ratio': np.nan, 'Br_size': np.nan}
## remove 'scattering' from stream_list to avoid redundant work in next for loop
stream_list.remove('scattering')

Expand Down
32 changes: 16 additions & 16 deletions scripts/prepare_agent_pdf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
def build_agen_Cl(peak_target=660, peak_tolerance=5, size_target=6, ):
# data_path = '/home/xf28id2/data_ZnCl2'
#data_path = '/home/xf28id2/data'
agent_data_path = '/home/xf28id2/Documents/ChengHung'
agent_data_path = '/home/xf28id2/Documents/ChengHung/pdffit2_test'


dofs = [
Expand All @@ -36,8 +36,8 @@ def build_agen_Cl(peak_target=660, peak_tolerance=5, size_target=6, ):
Objective(description="Peak emission", name="Peak", target=(peak_down, peak_up), weight=100, max_noise=0.25),
Objective(description="Peak width", name="FWHM", target="min", transform="log", weight=5., max_noise=0.25),
Objective(description="Quantum yield", name="PLQY", target="max", transform="log", weight=1., max_noise=0.25),
Objective(description="Particle size", name="size_nm", target=(size_target-1.5, size_target+1.5), transform="log", weight=0.1, max_noise=0.25),
Objective(description="Phase ratio", name="reduced_ratio", target=(ratio_down, ratio_up), transform="log", weight=0.1, max_noise=0.25),
Objective(description="Particle size", name="Br_size", target=(size_target-1.5, size_target+1.5), transform="log", weight=0.1, max_noise=0.25),
Objective(description="Phase ratio", name="Br_ratio", target=(ratio_down, ratio_up), transform="log", weight=0.1, max_noise=0.25),
]


Expand Down Expand Up @@ -69,23 +69,23 @@ def build_agen_Cl(peak_target=660, peak_tolerance=5, size_target=6, ):

metadata_keys = ["time", "uid", "r_2"]

# if os.path.exists(init_file):
# agent.load_data(init_file)

# else:
# filepaths = glob.glob(f"{agent_data_path}/*.json")
# filepaths.sort()

filepaths = glob.glob(f"{agent_data_path}/*.json")
filepaths.sort()

fn = agent_data_path + '/' + 'agent_data_update_quinine_CsPbCl3.csv'
df = pd.read_csv(fn, sep=',', index_col=False)
# fn = agent_data_path + '/' + 'agent_data_update_quinine_CsPbCl3.csv'
# df = pd.read_csv(fn, sep=',', index_col=False)


for i in range(len(df['uid'])):
# with open(fp, "r") as f:
# data = json.load(f)
data = {}
for key in df.keys():
data[key] = df[key][i]
# for i in range(len(df['uid'])):
for fp in tqdm(filepaths):
with open(fp, "r") as f:
data = json.load(f)
# print(data)
# data = {}
# for key in df.keys():
# data[key] = df[key][i]

r_2_min = 0.05
try:
Expand Down

0 comments on commit bab2710

Please sign in to comment.