Skip to content

Commit

Permalink
Update workflow for XPD insitu
Browse files Browse the repository at this point in the history
  • Loading branch information
XPD_Operator committed Jul 10, 2024
1 parent 909ee00 commit 959be3b
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 35 deletions.
26 changes: 26 additions & 0 deletions scripts/_data_export.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pandas as pd
import os
from _data_analysis import *
import json



Expand All @@ -27,6 +28,31 @@ def _read_input_xlsx(fn, sheet_name='inputs', skiprows=1, header=None, index_col
return input_dic




## dump or read glbl["_dark_dict_list"] into or from a json file
def _dark_json(_dark_dict_list, json_fn, dump_or_load='dump'):
if dump_or_load == 'dump':
with open(json_fn, 'w') as f:
# indent=2 is not needed but makes the file human-readable
# if the data is nested
json.dump(_dark_dict_list, f, indent=2)

print(f'Dump dark_dict_list to {json_fn}')

elif dump_or_load == 'load':
with open(json_fn, 'r') as f:
new_dark = json.load(f)
for dark in new_dark:
_dark_dict_list.append(dark)

print(f'Append dark list from {os.path.basename(json_fn)} to _dark_dict_list')
# return _dark_dict_list





## Creat file name based on date, time, uid, sample_name/type from fluorescence stream
def _fn_generator(uid, beamline_acronym='xpd'):
_, metadata_dic = read_qepro_by_stream(uid, stream_name='fluorescence', data_agent='tiled', beamline_acronym=beamline_acronym)
Expand Down
33 changes: 24 additions & 9 deletions scripts/_synthesis_queue_RM.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ def synthesis_queue(
num_abs=5,
num_flu=5,
det1_time=15,
det1_frame_rate=0.2,
pos='back',
dummy_qserver=False,
is_iteration=False,
Expand Down Expand Up @@ -129,6 +130,15 @@ def synthesis_queue(

restplan = BPlan('sleep_sec_q', 30)
RM.item_add(restplan, pos=pos)



## 4.0 Configure area detector in Qserver
scanplan = BPlan('configure_area_det',
det='pe1c',
exposure=1,
acq_time=det1_frame_rate)
RM.item_add(scanplan, pos=pos)


## 4-1. Take a fluorescence peak to check reaction
Expand Down Expand Up @@ -158,25 +168,30 @@ def synthesis_queue(
RM.item_add(restplan, pos=pos)


## 6.0 Set global parameters in Qserver
scanplan = BPlan('set_glbl_qserver',
frame_acq_time=0.5,
dk_window=1000,
auto_load_calib=True)
## 6.0 Print global parameters in Qserver
scanplan = BPlan('print_glbl_qserver')
RM.item_add(scanplan, pos=pos)


## 6.1 Configure area detector in Qserver
scanplan = BPlan('configure_area_det',
det='pe1c',
exposure=det1_time,
acq_time=det1_frame_rate)
RM.item_add(scanplan, pos=pos)


## 6. Start xray_uvvis bundle plan to take real data ('pe1c' or 'det')
scanplan = BPlan('xray_uvvis_plan', 'pe1c', 'qepro',
scanplan = BPlan('xray_uvvis_plan2', 'pe1c', 'qepro',
num_abs=num_abs,
num_flu=num_flu,
det1_time=det1_time,
num_flu=num_flu,
sample_type=sample[i],
spectrum_type='Absorbtion',
correction_type='Reference',
pump_list=pump_list,
precursor_list=precursor_list,
mixer=mixer)
mixer=mixer,
dilute_pump=pump_list[-1])
RM.item_add(scanplan, pos=pos)

## 6.1 sleep 20 seconds for stopping
Expand Down
54 changes: 29 additions & 25 deletions scripts/kafka_consumer_iterate_XPD_RM.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,8 @@
name_by_prefix=bool(prefix[0]),
num_abs=num_uvvis[0],
num_flu=num_uvvis[1],
det1_time=num_uvvis[2],
det1_time=num_uvvis[2],
det1_frame_rate=num_uvvis[3],
zmq_control_addr=zmq_control_addr,
zmq_info_addr=zmq_info_addr,
)
Expand All @@ -111,26 +112,26 @@
write_agent_data = True
agent_data_path = '/home/xf28id2/Documents/ChengHung/202405_halide_data/20240702_Br'

USE_AGENT_iterate = False
USE_AGENT_iterate = True
peak_target = 515
if USE_AGENT_iterate:
import torch
from prepare_agent_pdf import build_agen
agent = build_agen(peak_target=peak_target, agent_data_path=agent_data_path)

iq_to_gr = False
iq_to_gr = True
if iq_to_gr:
from diffpy.pdfgetx import PDFConfig
global gr_path, cfg_fn, iq_fn, bkg_fn
gr_path = '/home/xf28id2/Documents/ChengHung/pdfstream_test/'
cfg_fn = '/home/xf28id2/Documents/ChengHung/pdfstream_test/pdfgetx3.cfg'

# ### CsPbBr2 test
# iq_fn = glob.glob(os.path.join(gr_path, '**CsPbBr2**.chi'))
# bkg_fn = glob.glob(os.path.join(gr_path, '**Tol_Olm_bkg**.chi'))

bkg_fn = ['/nsls2/data/xpd-new/legacy/processed/xpdUser/tiff_base/Toluene_OleAcid_mask/integration/Toluene_OleAcid_mask_20240602-122852_c49480_primary-1_mean_q.chi']

### CsPbBr2 test
iq_fn = glob.glob(os.path.join(gr_path, '**CsPbBr2**.chi'))[0]
cfg_fn = glob.glob(os.path.join(gr_path, '**CsPbBr2**.cfg'))[0]
bkg_fn = glob.glob(os.path.join(gr_path, '**Tol_Olm_bkg**.chi'))

search_and_match = False
if search_and_match:
from updated_pipeline_pdffit2 import Refinery
Expand All @@ -150,7 +151,7 @@
if use_sandbox:
sandbox_tiled_client = from_uri("https://tiled.nsls2.bnl.gov/api/v1/metadata/xpd/sandbox")

write_to_sandbox = False
write_to_sandbox = True
if write_to_sandbox:
sandbox_tiled_client = from_uri("https://tiled.nsls2.bnl.gov/api/v1/metadata/xpd/sandbox")

Expand Down Expand Up @@ -336,9 +337,9 @@ def print_message(consumer, doctype, doc,
iq_df2['q'] = iq_Q
iq_df2['I(q)'] = iq_I

# ### CsPbBr2 test
# iq_df = pd.read_csv(iq_fn[-1], skiprows=1, names=['q', 'I(q)'], sep=' ').to_numpy().T
# iq_df2 = pd.read_csv(iq_fn[-1], skiprows=1, names=['q', 'I(q)'], sep=' ')
### CsPbBr2 test
iq_df = pd.read_csv(iq_fn, skiprows=1, names=['q', 'I(q)'], sep=' ').to_numpy().T
iq_df2 = pd.read_csv(iq_fn, skiprows=1, names=['q', 'I(q)'], sep=' ')

else:
pass
Expand All @@ -349,8 +350,8 @@ def print_message(consumer, doctype, doc,
fn_uid = de._fn_generator(uid, beamline_acronym=beamline_acronym_01)
gr_fn = f'{fn_uid}_scattering.gr'

# ### CsPbBr2 test
# gr_fn = f'{iq_fn[:-4]}.gr'
### CsPbBr2 test
gr_fn = f'{iq_fn[:-4]}.gr'

# Build pdf config file from a scratch
pdfconfig = PDFConfig()
Expand Down Expand Up @@ -396,12 +397,12 @@ def print_message(consumer, doctype, doc,

gr_fit_df = pd.DataFrame()
if fitting_pdf:
# ### CsPbBr2 test
# gr_data = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/CsPbBr3.gr'
### CsPbBr2 test
gr_data = '/home/xf28id2/Documents/ChengHung/pdffit2_example/CsPbBr3/CsPbBr3.gr'

gr_df = pd.read_csv(gr_data, names=['r', 'g(r)'], sep =' ')
pf = pc._pdffit2_CsPbX3(gr_data, cif_list, rmax=100, qmax=14, qdamp=0.031, qbroad=0.032,
fix_APD=True, toler=0.001, return_pf=True)
fix_APD=True, toler=0.01, return_pf=True)
phase_fraction = pf.phase_fractions()['mass']
particel_size = []
for i in range(pf.num_phases()):
Expand Down Expand Up @@ -591,8 +592,8 @@ def print_message(consumer, doctype, doc,
res_values = []
for i in new_points_label:
if i in new_points['points'].keys():
res_values.append(new_points['points'][i])
x_tensor = torch.tensor(res_value)
res_values.append(new_points['points'][i][0])
x_tensor = torch.tensor(res_values)
post = agent.posterior(x_tensor)
post_mean = post.mean.tolist()[0]
post_stddev = post.stddev.tolist()[0]
Expand Down Expand Up @@ -748,10 +749,11 @@ def print_message(consumer, doctype, doc,
# rate_list = np.asarray(rate_list)
rate_list = []
for i in new_points_label:
for key in new_points['points']:
if i == key:
rate_list.append(new_points['points'][key][0])
rate_list.insert(2, sum(rate_list)/10)
if i in new_points['points'].keys():
rate_list.append(new_points['points'][i][0])
else:
rate_list.append(0)
# rate_list.insert(2, sum(rate_list)/10)
rate_list.append(sum(rate_list)*5)

else:
Expand All @@ -776,13 +778,15 @@ def print_message(consumer, doctype, doc,
wash_tube=wash_tube,
name_by_prefix=bool(prefix[0]),
num_abs=num_uvvis[0],
num_flu=num_uvvis[1],
num_flu=num_uvvis[1],
det1_time=num_uvvis[2],
det1_frame_rate=num_uvvis[3],
is_iteration=True,
zmq_control_addr=zmq_control_addr,
zmq_info_addr=zmq_info_addr,
)

RM.queue_start()
# RM.queue_start()

# elif use_good_bad:
else:
Expand Down
8 changes: 7 additions & 1 deletion scripts/notes/LDRD20-31_workflow_XPD.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,13 @@
"source": [
"~$ conda activate 2024-1.0-py310-tiled-blop\n",
"\n",
"~$ python kafka_consumer_iterate_RM.py xpd xpd-ldrd20-31"
"~$ python kafka_consumer_iterate_RM.py xpd xpd-ldrd20-31\n",
"\n",
"\n",
"## Try to write processed data into Sandbox\n",
"~$ conda activate 2024-2.2-py310-tiled\n",
"\n",
"~$ PYTHONPATH=$BS_PYTHONPATH_310 python kafka_consumer_iterate_XPD_RM.py xpd xpd-ldrd20-31"
]
},
{
Expand Down

0 comments on commit 959be3b

Please sign in to comment.