Skip to content

Commit

Permalink
Revise for XPD beamline acquisition
Browse files Browse the repository at this point in the history
  • Loading branch information
XPD_Operator committed Jun 29, 2024
1 parent 661d418 commit d4d57d3
Show file tree
Hide file tree
Showing 2 changed files with 70 additions and 50 deletions.
44 changes: 22 additions & 22 deletions scripts/_synthesis_queue_RM.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,18 +127,18 @@ def synthesis_queue(
flowplan = BPlan('start_group_infuse', [pump_list[-1]], [rate_list[i][-1]])
RM.item_add(flowplan, pos=pos)

restplan = BPlan('sleep_sec_q', 30)
restplan = BPlan('sleep_sec_q', 5)
RM.item_add(restplan, pos=pos)


# ## 4-1. Take a fluorescence peak to check reaction
# scanplan = BPlan('take_a_uvvis_csv_q', sample_type=sample[i],
# spectrum_type='Corrected Sample',
# correction_type='Dark',
# pump_list=pump_list,
# precursor_list=precursor_list,
# mixer=mixer)
# RM.item_add(scanplan, pos=pos)
## 4-1. Take a fluorescence peak to check reaction
scanplan = BPlan('take_a_uvvis_csv_q', sample_type=sample[i],
spectrum_type='Corrected Sample',
correction_type='Dark',
pump_list=pump_list,
precursor_list=precursor_list,
mixer=mixer)
RM.item_add(scanplan, pos=pos)


# ## 4-2. Take a Absorption spectra to check reaction
Expand All @@ -158,21 +158,21 @@ def synthesis_queue(
RM.item_add(restplan, pos=pos)


# ## 6. Start xray_uvvis bundle plan to take real data ('pe1c' or 'det')
# scanplan = BPlan('xray_uvvis_plan', 'det', 'qepro',
# num_abs=num_abs,
# num_flu=num_flu,
# det1_time=det1_time,
# sample_type=sample[i],
# spectrum_type='Absorbtion',
# correction_type='Reference',
# pump_list=pump_list,
# precursor_list=precursor_list,
# mixer=mixer)
# RM.item_add(scanplan, pos=pos)
## 6. Start xray_uvvis bundle plan to take real data ('pe1c' or 'det')
scanplan = BPlan('xray_uvvis_plan', 'pe1c', 'qepro',
num_abs=num_abs,
num_flu=num_flu,
det1_time=det1_time,
sample_type=sample[i],
spectrum_type='Absorbtion',
correction_type='Reference',
pump_list=pump_list,
precursor_list=precursor_list,
mixer=mixer)
RM.item_add(scanplan, pos=pos)

## 6.1 sleep 20 seconds for stopping
restplan = BPlan('sleep_sec_q', 20)
restplan = BPlan('sleep_sec_q', 5)
RM.item_add(restplan, pos=pos)


Expand Down
76 changes: 48 additions & 28 deletions scripts/kafka_consumer_iterate_XPD_RM.py
Original file line number Diff line number Diff line change
Expand Up @@ -109,7 +109,7 @@
use_good_bad = True
post_dilute = True
write_agent_data = True
agent_data_path = '/home/xf28id2/Documents/ChengHung/202405_halide_data/with_xray'
agent_data_path = '/home/xf28id2/Documents/ChengHung/202405_halide_data/test'

USE_AGENT_iterate = False
peak_target = 515
Expand All @@ -124,9 +124,12 @@
global gr_path, cfg_fn, iq_fn, bkg_fn
gr_path = '/home/xf28id2/Documents/ChengHung/pdfstream_test/'
cfg_fn = '/home/xf28id2/Documents/ChengHung/pdfstream_test/pdfgetx3.cfg'
iq_fn = glob.glob(os.path.join(gr_path, '**CsPb**.chi'))
# bkg_fn = glob.glob(os.path.join(gr_path, '**bkg**.chi'))
bkg_fn = ['/nsls2/data/xpd-new/legacy/processed/xpdUser/tiff_base/Toluene_OleAcid_mask/integration/Toluene_OleAcid_mask_20240602-122852_c49480_primary-1_mean_q.chi']

### CsPbBr2 test
iq_fn = glob.glob(os.path.join(gr_path, '**CsPbBr2**.chi'))
bkg_fn = glob.glob(os.path.join(gr_path, '**Tol_Olm_bkg**.chi'))

# bkg_fn = ['/nsls2/data/xpd-new/legacy/processed/xpdUser/tiff_base/Toluene_OleAcid_mask/integration/Toluene_OleAcid_mask_20240602-122852_c49480_primary-1_mean_q.chi']

search_and_match = True
if search_and_match:
Expand All @@ -135,7 +138,7 @@
# mystery_path = "'/home/xf28id2/Documents/ChengHung/pdfstream_test/gr"
results_path = "/home/xf28id2/Documents/ChengHung/pdffit2_example/results_CsPbBr_chemsys_search"

fitting_pdf = False
fitting_pdf = True
if fitting_pdf:
global pdf_cif_dir, cif_list, gr_data
pdf_cif_dir = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/'
Expand All @@ -147,7 +150,7 @@
if use_sandbox:
sandbox_tiled_client = from_uri("https://tiled.nsls2.bnl.gov/api/v1/metadata/xpd/sandbox")

write_to_sandbox = False
write_to_sandbox = True
if write_to_sandbox:
sandbox_tiled_client = from_uri("https://tiled.nsls2.bnl.gov/api/v1/metadata/xpd/sandbox")

Expand All @@ -172,6 +175,11 @@ def print_kafka_messages(beamline_acronym_01, beamline_acronym_02, csv_path=csv_
f'{USE_AGENT_iterate = }\n'
f'{peak_target = }\n'
f'{write_agent_data = }\n'
f'{iq_to_gr = }\n'
f'{search_and_match = }\n'
f'{fitting_pdf = }\n'
f'{use_sandbox = }\n'
f'{write_to_sandbox = }\n'
f'{zmq_control_addr = }')


Expand Down Expand Up @@ -323,22 +331,27 @@ def print_message(consumer, doctype, doc,
u = plot_uvvis(qepro_dic, metadata_dic)

if use_sandbox:
# iq_df = pd.DataFrame()
# iq_df['chi_Q'] = iq_Q
# iq_df['chi_I'] = iq_I
iq_df = np.asarray([iq_Q, iq_I])
# iq_df = iq_fn[0]
iq_df2 = pd.DataFrame()
iq_df2['q'] = iq_Q
iq_df2['I(q)'] = iq_I
# iq_df = np.asarray([iq_Q, iq_I])
# iq_df2 = pd.DataFrame()
# iq_df2['q'] = iq_Q
# iq_df2['I(q)'] = iq_I

### CsPbBr2 test
iq_df = pd.read_csv(iq_fn[-1], skiprows=1, names=['q', 'I(q)'], sep=' ').to_numpy().T
iq_df2 = pd.read_csv(iq_fn[-1], skiprows=1, names=['q', 'I(q)'], sep=' ')

else:
pass
# iq_df = iq_fn[0]

if iq_to_gr:
# Grab metadat from stream_name = fluorescence for naming gr file
fn_uid = de._fn_generator(uid, beamline_acronym=beamline_acronym_01)
gr_fn = f'{fn_uid}_scattering.gr'
# gr_fn = f'{fn_uid}_scattering.gr'

### CsPbBr2 test
gr_fn = f'{iq_fn[:-4]}.gr'

# Build pdf config file from a scratch
pdfconfig = PDFConfig()
pdfconfig.readConfig(cfg_fn)
Expand Down Expand Up @@ -383,10 +396,12 @@ def print_message(consumer, doctype, doc,

gr_fit_df = pd.DataFrame()
if fitting_pdf:
# gr_data = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/CsPbBr3.gr'
### CsPbBr2 test
gr_data = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/CsPbBr3.gr'

gr_df = pd.read_csv(gr_data, names=['r', 'g(r)'], sep =' ')
pf = pc._pdffit2_CsPbX3(gr_data, cif_list, rmax=120, qmax=14, qdamp=0.031, qbroad=0.032,
fix_APD=False, toler=0.001, return_pf=True)
pf = pc._pdffit2_CsPbX3(gr_data, cif_list, rmax=100, qmax=14, qdamp=0.031, qbroad=0.032,
fix_APD=True, toler=0.001, return_pf=True)
phase_fraction = pf.phase_fractions()['mass']
particel_size = []
for i in range(pf.num_phases()):
Expand All @@ -399,15 +414,16 @@ def print_message(consumer, doctype, doc,
pdf_property={'Br_ratio': phase_fraction[0], 'Br_size':particel_size[0]}
gr_fit = np.asarray([pf.getR(), pf.getpdf_fit()])
# gr_fit_df = pd.DataFrame()
gr_fit_df['r'] = pf.getR()
gr_fit_df['fit_g(g)'] = pf.getpdf_fit()
gr_fit_df['fit_r'] = pf.getR()
gr_fit_df['fit_g(r)'] = pf.getpdf_fit()
else:
gr_fit = None
gr_fit_df['r'] = np.nan
gr_fit_df['fit_g(g)'] = np.nan
gr_fit_df['fit_r'] = np.nan
gr_fit_df['fit_g(r)'] = np.nan
pdf_property={'Br_ratio': np.nan, 'Br_size': np.nan}

u.plot_iq_to_gr(iq_df, gr_df.to_numpy().T, gr_fit=gr_fit)
if iq_to_gr:
u.plot_iq_to_gr(iq_df, gr_df.to_numpy().T, gr_fit=gr_fit)
## remove 'scattering' from stream_list to avoid redundant work in next for loop
stream_list.remove('scattering')

Expand All @@ -434,7 +450,8 @@ def print_message(consumer, doctype, doc,
clear_fig=False
u.plot_data(clear_fig=clear_fig)
print(f'\n** Plot {stream_name} in uid: {uid[0:8]} complete **\n')


global abs_array, abs_array_offset, x0, y0
## Idenfify good/bad data if it is a fluorescence scan in 'take_a_uvvis'
if qepro_dic['QEPro_spectrum_type'][0]==2 and stream_name=='take_a_uvvis':
print(f'\n*** start to identify good/bad data in stream: {stream_name} ***\n')
Expand Down Expand Up @@ -479,7 +496,8 @@ def print_message(consumer, doctype, doc,
# u.plot_average_good(x0, y0, color=cmap(color_idx[sub_idx]), label=label_uid)
# sub_idx = sample.index(metadata_dic['sample_type'])
u.plot_average_good(x0, y0, label=label_uid, clf_limit=9)


global f_fit
## Skip peak fitting if qepro type is absorbance
if qepro_dic['QEPro_spectrum_type'][0] == 3:
print(f"\n*** No need to carry out fitting for {stream_name} in uid: {uid[:8]} ***\n")
Expand Down Expand Up @@ -645,8 +663,9 @@ def print_message(consumer, doctype, doc,
except (UnboundLocalError):
pass

global sandbox_uid
## Save processed data in df and agent_data as metadta in sandbox
if write_to_sandbox:
if write_to_sandbox and (stream_name == 'fluorescence'):
df = pd.DataFrame()
df['wavelength_nm'] = x0
df['absorbance_mean'] = abs_array
Expand All @@ -661,11 +680,12 @@ def print_message(consumer, doctype, doc,
entry = sandbox_tiled_client.write_dataframe(df_new, metadata=agent_data)
# uri = sandbox_tiled_client.values()[-1].uri
uri = entry.uri
sandbox_uid = uri.split('/')[-1]
agent_data.update({'sandbox_uid': sandbox_uid})
print(f"\nwrote to Tiled sandbox uid: {sandbox_uid}")

## Save agent_data locally
if write_agent_data:
if write_agent_data and (stream_name == 'fluorescence'):
# agent_data.update({'sandbox_uid': sandbox_uid})
with open(f"{agent_data_path}/{data_id}.json", "w") as f:
json.dump(agent_data, f)
Expand Down Expand Up @@ -693,7 +713,7 @@ def print_message(consumer, doctype, doc,
elif len(good_data) <= 2 and use_good_bad:
print('*** Add another fluorescence and absorption scan to the fron of qsever ***\n')

restplan = BPlan('sleep_sec_q', 3)
restplan = BPlan('sleep_sec_q', 5)
RM.item_add(restplan, pos=0)

scanplan = BPlan('take_a_uvvis_csv_q',
Expand Down

0 comments on commit d4d57d3

Please sign in to comment.