diff --git a/.flake8 b/.flake8 index 763d84f..6eb631f 100644 --- a/.flake8 +++ b/.flake8 @@ -9,4 +9,4 @@ exclude = docs/source/conf.py max-line-length = 115 # Ignore some style 'errors' produced while formatting by 'black' -ignore = E203, W503 +ignore = E203, E402, E501, E712, E741, F402, F632, F811, F821, F823, F841, W503, W605 diff --git a/docs/source/conf.py b/docs/source/conf.py index 84ecb08..f897835 100644 --- a/docs/source/conf.py +++ b/docs/source/conf.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python3 +# !/usr/bin/env python3 # -*- coding: utf-8 -*- # # pyCHX documentation build configuration file, created by @@ -159,7 +159,7 @@ # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). +# author, documentclass [howto, manual, or own class]). latex_documents = [ ( master_doc, @@ -190,7 +190,7 @@ # Grouping the document tree into Texinfo files. List of tuples # (source start file, target name, title, author, -# dir menu entry, description, category) +# dir menu entry, description, category) texinfo_documents = [ ( master_doc, diff --git a/pyCHX/Badpixels.py b/pyCHX/Badpixels.py index 7b7dc5b..751e304 100644 --- a/pyCHX/Badpixels.py +++ b/pyCHX/Badpixels.py @@ -90,11 +90,11 @@ 4155535, ] ), # 57 points, coralpor - "6cc34a": np.array([1058942, 2105743, 2105744, 2107813, 2107815, 2109883, 4155535]), # coralpor + "6cc34a": np.array([1058942, 2105743, 2105744, 2107813, 2107815, 2109883, 4155535]), # coralpor } -## Create during 2018 Cycle 1 +# Create during 2018 Cycle 1 BadPix_4M = np.array( [ 828861, diff --git a/pyCHX/Compress_readerNew.py b/pyCHX/Compress_readerNew.py index 8d69158..a5312c1 100644 --- a/pyCHX/Compress_readerNew.py +++ b/pyCHX/Compress_readerNew.py @@ -82,7 +82,7 @@ def __init__(self, filename, mode="rb", nbytes=2): def rdframe(self, n): # read header then image - hdr = self._read_header(n) + self._read_header(n) pos, vals = self._read_raw(n) img = np.zeros((self._rows * self._cols,)) img[pos] = vals @@ -90,12 +90,12 @@ def rdframe(self, n): def rdrawframe(self, n): # read header then image - hdr = self._read_header(n) + self._read_header(n) return self._read_raw(n) def rdchunk(self): """read the next chunk""" - header = self._fd.read(1024) + self._fd.read(1024) def index(self): """Index the file by reading all frame_indexes. diff --git a/pyCHX/Create_Report.py b/pyCHX/Create_Report.py index bfb7b30..a4a3dd2 100644 --- a/pyCHX/Create_Report.py +++ b/pyCHX/Create_Report.py @@ -20,18 +20,14 @@ def check_dict_keys(dicts, key): import os -import sys -from datetime import datetime -from time import time import h5py import numpy as np import pandas as pds from PIL import Image -from reportlab.lib.colors import black, blue, brown, green, pink, red, white -from reportlab.lib.pagesizes import A4, letter +from reportlab.lib.colors import black, blue, red +from reportlab.lib.pagesizes import letter from reportlab.lib.styles import getSampleStyleSheet -from reportlab.lib.units import cm, inch, mm from reportlab.pdfgen import canvas from pyCHX.chx_generic_functions import pload_obj @@ -57,8 +53,8 @@ def add_image_string( height = img_height width = height / ratio # if width>400: - # width = 350 - # height = width*ratio + # width = 350 + # height = width*ratio c.drawImage(image, img_left, img_top, width=width, height=height, mask=None) c.setFont("Helvetica", 16) @@ -203,11 +199,11 @@ def load_metadata(self): else: uid_OneTime = uid + "_fra_%s_%s" % (beg_OneTime, end_OneTime) if beg_TwoTime is None: - uid_TwoTime = uid + "_fra_%s_%s" % (beg, end) + uid + "_fra_%s_%s" % (beg, end) else: - uid_TwoTime = uid + "_fra_%s_%s" % (beg_TwoTime, end_TwoTime) + uid + "_fra_%s_%s" % (beg_TwoTime, end_TwoTime) - except: + except Exception: uid_ = uid uid_OneTime = uid if beg is None: @@ -358,7 +354,7 @@ def report_invariant(self, top=300, new_page=False): ROI on average intensity image ROI on circular average """ - uid = self.uid + self.uid c = self.c # add sub-title, static images c.setFillColor(black) @@ -451,7 +447,7 @@ def report_meta(self, top=740, new_page=False): md = self.md try: uid = md["uid"] - except: + except Exception: uid = self.uid # add sub-title, metadata c.setFont("Helvetica", 20) @@ -489,25 +485,25 @@ def report_meta(self, top=740, new_page=False): try: # try exp time from detector exposuretime = md["count_time"] # exposure time in sec - except: + except Exception: exposuretime = md["cam_acquire_time"] # exposure time in sec try: # try acq time from detector acquisition_period = md["frame_time"] - except: + except Exception: try: acquisition_period = md["acquire period"] - except: + except Exception: uid = md["uid"] acquisition_period = float(db[uid]["start"]["acquire period"]) s = [] - s.append("UID: %s" % uid) ###line 1, for uid - s.append("Sample: %s" % md["sample"]) ####line 2 sample + s.append("UID: %s" % uid) # line 1, for uid + s.append("Sample: %s" % md["sample"]) # line 2 sample s.append( "Data Acquisition From: %s To: %s" % (md["start_time"], md["stop_time"]) - ) ####line 3 Data Acquisition time - s.append("Measurement: %s" % md["Measurement"]) ####line 4 'Measurement + ) # line 3 Data Acquisition time + s.append("Measurement: %s" % md["Measurement"]) # line 4 'Measurement # print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) # print(acquisition_period) @@ -519,12 +515,12 @@ def report_meta(self, top=740, new_page=False): round(float(exposuretime) * 1000, 4), round(float(acquisition_period) * 1000, 4), ) - ) ####line 5 'lamda... + ) # line 5 'lamda... s.append( "Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s" % (md["detector_distance"], md["feedback_x"], md["feedback_y"], md["shutter mode"]) - ) ####line 6 'Detector-Sample Distance.. + ) # line 6 'Detector-Sample Distance.. if self.report_type == "saxs": s7 = "Beam Center: [%s, %s] (pixel)" % (md["beam_center_x"], md["beam_center_y"]) elif self.report_type == "gi_saxs": @@ -540,22 +536,22 @@ def report_meta(self, top=740, new_page=False): s7 += " || " + "BadLen: %s" % len(md["bad_frame_list"]) s7 += " || " + "Transmission: %s" % md["transmission"] - s.append(s7) ####line 7 'Beam center... + s.append(s7) # line 7 'Beam center... m = "Mask file: %s" % md["mask_file"] + " || " + "ROI mask file: %s" % md["roi_mask_file"] - # s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename - # s.append( ) ####line 8 mask filename + # s.append( 'Mask file: %s'%md['mask_file'] ) #line 8 mask filename + # s.append( ) #line 8 mask filename s.append(m) if self.res_h5_filename is not None: self.data_dir_ = self.data_dir + self.res_h5_filename else: self.data_dir_ = self.data_dir - s.append("Analysis Results Dir: %s" % self.data_dir_) ####line 9 results folder + s.append("Analysis Results Dir: %s" % self.data_dir_) # line 9 results folder - s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) ####line 10 metadata folder + s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) # line 10 metadata folder try: - s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) ####line 11 notebook folder - except: + s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) # line 11 notebook folder + except Exception: pass # print( 'here' ) line = 1 @@ -578,7 +574,7 @@ def report_static(self, top=560, new_page=False, iq_fit=False): c = self.c c.setFont("Helvetica", 20) - uid = self.uid + self.uid ds = 220 self.sub_title_num += 1 @@ -668,7 +664,7 @@ def report_ROI(self, top=300, new_page=False): ROI on average intensity image ROI on circular average """ - uid = self.uid + self.uid c = self.c # add sub-title, static images c.setFillColor(black) @@ -731,11 +727,10 @@ def report_time_analysis(self, top=720, new_page=False): mean intensity of each ROI as a function of time """ c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) top1 = top - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Time Dependent Plot" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -765,7 +760,7 @@ def report_time_analysis(self, top=720, new_page=False): # plot iq~t if self.report_type == "saxs": imgf = self.Iq_t_file - image = self.data_dir + imgf + self.data_dir + imgf img_height = 140 img_left, img_top = 350, top @@ -846,11 +841,10 @@ def report_oavs(self, top=350, oavs_file=None, new_page=False): """create the oavs images report""" c = self.c - uid = self.uid + self.uid # add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. OAVS Images" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -906,11 +900,10 @@ def report_one_time(self, top=350, g2_fit_file=None, q_rate_file=None, new_page= """ c = self.c - uid = self.uid + self.uid # add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1092,11 +1085,10 @@ def report_mulit_one_time(self, top=720, new_page=False): q-rate fit """ c = self.c - uid = self.uid + self.uid # add sub-title, One Time Correlation Function c.setFillColor(black) c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. One Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1148,11 +1140,10 @@ def report_two_time(self, top=720, new_page=False): two one-time correlatoin function from multi-one-time and from diagonal two-time """ c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Two Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1251,11 +1242,10 @@ def report_four_time(self, top=720, new_page=False): """ c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Four Time Correlation Function" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1288,10 +1278,9 @@ def report_four_time(self, top=720, new_page=False): def report_dose(self, top=720, new_page=False): c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Dose Analysis" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1335,11 +1324,10 @@ def report_flow_pv_g2(self, top=720, new_page=False): two one-time correlatoin function from multi-one-time and from diagonal two-time """ c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Flow One Time Analysis" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1349,7 +1337,7 @@ def report_flow_pv_g2(self, top=720, new_page=False): # add xsvs fit imgf = self.flow_g2v - image = self.data_dir + imgf + self.data_dir + imgf img_height = 300 img_left, img_top = 80, top @@ -1399,11 +1387,10 @@ def report_flow_pv_two_time(self, top=720, new_page=False): two one-time correlatoin function from multi-one-time and from diagonal two-time """ c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Flow One &Two Time Comparison" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1414,7 +1401,7 @@ def report_flow_pv_two_time(self, top=720, new_page=False): if False: imgf = self.two_time - image = self.data_dir + imgf + self.data_dir + imgf img_height = 300 img_left, img_top = 80, top @@ -1484,11 +1471,10 @@ def report_xsvs(self, top=720, new_page=False): two one-time correlatoin function from multi-one-time and from diagonal two-time """ c = self.c - uid = self.uid + self.uid # add sub-title, Time-dependent plot c.setFont("Helvetica", 20) - ds = 20 self.sub_title_num += 1 c.drawString(10, top, "%s. Visibility Analysis" % self.sub_title_num) # add title c.setFont("Helvetica", 14) @@ -1571,8 +1557,8 @@ def save_page(self): c.save() def done(self): - out_dir = self.out_dir - uid = self.uid + self.out_dir + self.uid print() print("*" * 40) @@ -1589,7 +1575,6 @@ def create_multi_pdf_reports_for_uids(uids, g2, data_dir, report_type="saxs", ap Save pdf report in data dir """ for key in list(g2.keys()): - i = 1 for sub_key in list(g2[key].keys()): uid_i = uids[key][sub_key] data_dir_ = os.path.join(data_dir, "%s/" % uid_i) @@ -1627,7 +1612,6 @@ def create_one_pdf_reports_for_uids(uids, g2, data_dir, filename="all_in_one", r page = 1 for key in list(g2.keys()): - i = 1 for sub_key in list(g2[key].keys()): uid_i = uids[key][sub_key] data_dir_ = os.path.join(data_dir, "%s/" % uid_i) @@ -1662,10 +1646,10 @@ def save_res_h5(full_uid, data_dir, save_two_time=False): for key in md.keys(): try: meta_data.attrs[key] = md[key] - except: + except Exception: pass - shapes = md["avg_img"].shape + md["avg_img"].shape avg_h5 = hf.create_dataset("avg_img", data=md["avg_img"]) mask_h5 = hf.create_dataset("mask", data=md["mask"]) roi_h5 = hf.create_dataset("roi", data=md["ring_mask"]) @@ -1706,7 +1690,7 @@ def load_res_h5(full_uid, data_dir): g2b_h5 = np.array(hf.get("g2b")) taus2_h5 = np.array(hf.get("taus2")) if "g12b" in hf: - g12b_h5 = np.array(hf.get("g12b")) + np.array(hf.get("g12b")) if "g12b" in hf: return meta_data, avg_h5, mask_h5, roi_h5, g2_h5, taus_h5, g2b_h5, taus2_h5, g12b @@ -1753,7 +1737,7 @@ def make_pdf_report( c.report_static(top=540, iq_fit=run_fit_form) c.report_ROI(top=290) page = 1 - ##Page Two for plot OVAS images if oavs_report is True + # Page Two for plot OVAS images if oavs_report is True if oavs_report: c.new_page() c.report_header(page=2) @@ -1821,8 +1805,8 @@ def make_pdf_report( return c -###################################### -###Deal with saving dict to hdf5 file +# +# Deal with saving dict to hdf5 file def save_dict_to_hdf5(dic, filename): """ .... @@ -1870,7 +1854,7 @@ def recursively_save_dict_contents_to_group(h5file, path, dic): elif isinstance(item, np.ndarray): try: h5file[path + key] = item - except: + except Exception: item = np.array(item).astype("|S9") h5file[path + key] = item if not np.array_equal(h5file[path + key].value, item): @@ -1921,13 +1905,13 @@ def export_xpcs_results_to_h5(filename, export_dir, export_dict): for key_ in md.keys(): try: meta_data.attrs[str(key_)] = md[key_] - except: + except Exception: pass elif key in dict_nest: # print(key) try: recursively_save_dict_contents_to_group(hf, "/%s/" % key, export_dict[key]) - except: + except Exception: print("Can't export the key: %s in this dataset." % key) elif key in ["g2_fit_paras", "g2b_fit_paras", "spec_km_pds", "spec_pds", "qr_1d_pds"]: @@ -1937,7 +1921,7 @@ def export_xpcs_results_to_h5(filename, export_dir, export_dict): key=key, mode="a", ) - except: + except Exception: flag = True else: data = hf.create_dataset(key, data=export_dict[key]) @@ -2004,7 +1988,7 @@ def extract_xpcs_results_from_h5_debug(filename, import_dir, onekey=None, exclud try: with h5py.File(fp, "r") as hf: extract_dict[onekey] = np.array(hf.get(onekey)) - except: + except Exception: print("The %s dosen't have this %s value" % (fp, onekey)) return extract_dict @@ -2032,7 +2016,7 @@ def export_xpcs_results_to_h5_old(filename, export_dir, export_dict): for key_ in md.keys(): try: meta_data.attrs[str(key_)] = md[key_] - except: + except Exception: pass elif key in dict_nest: k1 = export_dict[key] @@ -2064,7 +2048,6 @@ def extract_xpcs_results_from_h5(filename, import_dir, onekey=None, exclude_keys extact_dict: dict, with keys as md, g2, g4 et.al. """ - import numpy as np import pandas as pds extract_dict = {} @@ -2122,7 +2105,7 @@ def extract_xpcs_results_from_h5(filename, import_dir, onekey=None, exclude_keys else: extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) # extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) - except: + except Exception: print("The %s dosen't have this %s value" % (fp, onekey)) return extract_dict diff --git a/pyCHX/DEVs.py b/pyCHX/DEVs.py index 6e89cda..a76c199 100644 --- a/pyCHX/DEVs.py +++ b/pyCHX/DEVs.py @@ -1,7 +1,6 @@ # simple brute force multitau # from pyCHX.chx_generic_functions import average_array_withNan import numpy as np -import skbeam.core.roi as roi from numpy.fft import fft, ifft from tqdm import tqdm @@ -67,7 +66,7 @@ def plot_xy_with_fit(x, y, xf, out, xlim=[1e-3, 0.01], xlabel="q (" r"$\AA^{-1}$ return ax -#############For APD detector +# For APD detector def get_pix_g2_fft(time_inten): """YG Dev@CHX 2018/12/4 get g2 for oneD intensity g2 = G/(P*F) @@ -125,7 +124,7 @@ def get_pix_g2_PF(time_inten): return P, F -################### +# def get_ab_correlation(a, b): @@ -198,9 +197,9 @@ def auto_correlation_fft_padding_zeros(a, axis=-1): Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) In numpy form auto_cor(arr) = ifft( - fft( arr, n=2N-1, axis=axis ) ##padding enough zeros - ## for axis - * np.conjugate( ## conju for reverse array + fft( arr, n=2N-1, axis=axis ) #padding enough zeros + # for axis + * np.conjugate( # conju for reverse array fft(arr , n=2N-1, axis=axis) ) ) #do reverse fft Input: @@ -238,9 +237,9 @@ def auto_correlation_fft(a, axis=-1): Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) In numpy form auto_cor(arr) = ifft( - fft( arr, n=2N-1, axis=axis ) ##padding enough zeros - ## for axis - * np.conjugate( ## conju for reverse array + fft( arr, n=2N-1, axis=axis ) #padding enough zeros + # for axis + * np.conjugate( # conju for reverse array fft(arr , n=2N-1, axis=axis) ) ) #do reverse fft Input: @@ -273,7 +272,7 @@ def multitau(Ipix, bind, lvl=12, nobuf=8): plot(tt[1:],g2[1:,i]) will plot each g2. """ # if num_lev is None: - # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 # print(nobuf,nolvl) nobins = bind.max() + 1 nobufov2 = nobuf // 2 @@ -325,14 +324,14 @@ def average_array_withNan(array, axis=0, mask=None): Output: avg: averaged array along axis """ - shape = array.shape + array.shape if mask is None: mask = np.isnan(array) # mask = np.ma.masked_invalid(array).mask array_ = np.ma.masked_array(array, mask=mask) try: sums = np.array(np.ma.sum(array_[:, :], axis=axis)) - except: + except Exception: sums = np.array(np.ma.sum(array_[:], axis=axis)) cts = np.sum(~mask, axis=axis) @@ -399,8 +398,8 @@ def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=No # IF_mask = mask_pix[tau: Nt,: ] # IPF_mask = IP_mask | IF_mask # IPFm = average_array_withNan(IP*IF, axis = 0, )#mask= IPF_mask ) - # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) - # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) + # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) + # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) G2[tau_ind] = average_array_withNan( IP * IF, axis=0, @@ -415,8 +414,8 @@ def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=No ) # IFm tau_ind += 1 # for i in range(G2.shape[0]-1, 0, -1): - # if np.isnan(G2[i,0]): - # gmax = i + # if np.isnan(G2[i,0]): + # gmax = i gmax = tau_ind return G2[:gmax, :], Gp[:gmax, :], Gf[:gmax, :] @@ -434,7 +433,7 @@ def autocor_xytframe(self, n): return crl / (IP * IF) * FN -###################For Fit +# For Fit import matplotlib.pyplot as plt import numpy as np @@ -480,12 +479,12 @@ def _residuals(p, x, y, sigy, pall, adj, fun): def fitpr(chisq, a, sigmaa, title=None, lbl=None): """nicely print out results of a fit""" # get fitted results. - if lbl == None: + if lbl is None: lbl = [] for i in xrange(a.size): lbl.append("A%(#)02d" % {"#": i}) # print resuls of a fit. - if title != None: + if title is not None: print(title) print(" chisq=%(c).4f" % {"c": chisq}) for i in range(a.size): @@ -516,7 +515,7 @@ def Gaussian(x, p): return g -###########For ellipse shaped sectors by users +# For ellipse shaped sectors by users def elps_r(a, b, theta): """ Returns the radius of an ellipse with semimajor/minor axes a/b diff --git a/pyCHX/DataGonio.py b/pyCHX/DataGonio.py index b8603ef..f9b674f 100644 --- a/pyCHX/DataGonio.py +++ b/pyCHX/DataGonio.py @@ -1,17 +1,10 @@ # import sys -import os -import re # Regular expressions -import sys -import matplotlib as mpl import numpy as np # from scipy.optimize import leastsq # import scipy.special import PIL # Python Image Library (for opening PNG, etc.) -import pylab as plt -import skbeam.core.correlation as corr -import skbeam.core.roi as roi import skbeam.core.utils as utils from skbeam.core.accumulators.binned_statistic import BinnedStatistic1D, BinnedStatistic2D @@ -195,7 +188,7 @@ def convert_Qmap_old(img, qx_map, qy_map=None, bins=None, rangeq=None): # Mask -################################################################################ +# class Mask(object): """Stores the matrix of pixels to be excluded from further analysis.""" @@ -264,11 +257,11 @@ def invert(self): self.data = -1 * (self.data - 1) # End class Mask(object) - ######################################## + # # Calibration -################################################################################ +# class Calibration(object): """Stores aspects of the experimental setup; especially the calibration parameters for a particular detector. That is, the wavelength, detector @@ -289,7 +282,7 @@ def __init__(self, wavelength_A=None, distance_m=None, pixel_size_um=None): self.clear_maps() # Experimental parameters - ######################################## + # def set_wavelength(self, wavelength_A): """Set the experimental x-ray wavelength (in Angstroms).""" @@ -385,7 +378,7 @@ def get_q_per_pixel(self): return self.q_per_pixel # Maps - ######################################## + # def clear_maps(self): self.r_map_data = None @@ -491,7 +484,7 @@ def _generate_qxyz_maps(self): x = np.arange(self.width) - self.x0 y = np.arange(self.height) - self.y0 X, Y = np.meshgrid(x, y) - R = np.sqrt(X**2 + Y**2) + np.sqrt(X**2 + Y**2) # twotheta = np.arctan(self.r_map()*c) # radians theta_f = np.arctan2(X * c, 1) # radians @@ -507,11 +500,11 @@ def _generate_qxyz_maps(self): ) # End class Calibration(object) - ######################################## + # # CalibrationGonio -################################################################################ +# class CalibrationGonio(Calibration): """ The geometric claculations used here are described: @@ -520,7 +513,7 @@ class CalibrationGonio(Calibration): """ # Experimental parameters - ######################################## + # def set_angles( self, det_phi_g=0.0, det_theta_g=0.0, sam_phi=0, sam_chi=0, sam_theta=0, offset_x=0, offset_y=0, offset_z=0 @@ -604,7 +597,7 @@ def get_ratioDw(self): return self.distance_m / (width_mm / 1000.0) # Maps - ######################################## + # def q_map(self): if self.q_map_data is None: @@ -646,7 +639,7 @@ def _generate_qxyz_maps_no_offest(self): ) qz_c = -1 * k_over_Dprime * (d * np.sin(theta_g) + Y_c * np.cos(theta_g)) - qr_c = np.sqrt(np.square(qx_c) + np.square(qy_c)) + np.sqrt(np.square(qx_c) + np.square(qy_c)) q_c = np.sqrt(np.square(qx_c) + np.square(qy_c) + np.square(qz_c)) # Conversion factor for pixel coordinates @@ -656,7 +649,7 @@ def _generate_qxyz_maps_no_offest(self): x = np.arange(self.width) - self.x0 y = np.arange(self.height) - self.y0 X, Y = np.meshgrid(x, y) - R = np.sqrt(X**2 + Y**2) + np.sqrt(X**2 + Y**2) # twotheta = np.arctan(self.r_map()*c) # radians theta_f = np.arctan2(X * c, 1) # radians @@ -742,7 +735,7 @@ def _generate_qxyz_maps(self): x = np.arange(self.width) - self.x0 y = np.arange(self.height) - self.y0 X, Y = np.meshgrid(x, y) - R = np.sqrt(X**2 + Y**2) + np.sqrt(X**2 + Y**2) # twotheta = np.arctan(self.r_map()*c) # radians theta_f = np.arctan2(X * c, 1) # radians diff --git a/pyCHX/SAXS.py b/pyCHX/SAXS.py index fc2f54a..1d84051 100644 --- a/pyCHX/SAXS.py +++ b/pyCHX/SAXS.py @@ -5,16 +5,16 @@ """ # import numpy as np -from lmfit import Model, Parameter, Parameters, fit_report, minimize, report_fit -from scipy.optimize import curve_fit, least_squares, leastsq -from scipy.special import gamma, gammaln +from lmfit import Model, Parameters, minimize +from scipy.optimize import leastsq +from scipy.special import gamma from pyCHX.chx_generic_functions import find_index, plot1D, show_img # import matplotlib as mpl # import matplotlib.pyplot as plt # from matplotlib.colors import LogNorm -from pyCHX.chx_libs import * +from pyCHX.chx_libs import ff, iq, np, plt, q, utils, y def mono_sphere_form_factor_intensity(x, radius, delta_rho=100, fit_func="G"): @@ -120,10 +120,10 @@ def poly_sphere_form_factor_intensity_q2( def find_index_old(x, x0, tolerance=None): # find the position of P in a list (plist) with tolerance - N = len(x) + len(x) i = 0 position = None - if tolerance == None: + if tolerance is None: tolerance = (x[1] - x[0]) / 2.0 if x0 > max(x): position = len(x) - 1 @@ -471,7 +471,7 @@ def get_form_factor_fit2( # print(q4_bg) # resL = leastsq( fit_funcs, [ p ], args=( iq_, q_, num_points, spread, fit_func, function ), - # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) + # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) # radius, sigma, delta_rho, background = np.abs(pfit) if not q4_bg: @@ -513,7 +513,7 @@ def get_form_factor_fit2( for i in range(len(pfit)): try: error.append(np.absolute(pcov[i][i]) ** 0.5) - except: + except Exception: error.append(None) pfit_leastsq = pfit perr_leastsq = np.array(error) @@ -603,13 +603,13 @@ def get_form_factor_fit( # fit_power = 0 result = mod.fit(iq_ * q_**fit_power, pars, x=q_) # , fit_func=fit_func ) if function == "poly_sphere": - sigma = result.best_values["sigma"] + result.best_values["sigma"] elif function == "mono_sphere": - sigma = 0 - r = result.best_values["radius"] + pass + result.best_values["radius"] # scale = result.best_values['scale'] # baseline = result.best_values['baseline'] - delta_rho = result.best_values["delta_rho"] + result.best_values["delta_rho"] print(result.best_values) return result, q_ @@ -636,7 +636,7 @@ def plot_form_factor_with_fit(q, iq, q_, result, fit_power=0, res_pargs=None, re plt.title("uid= %s:--->" % uid + title_qr, fontsize=20, y=1.02) r = result.best_values["radius"] - delta_rho = result.best_values["delta_rho"] + result.best_values["delta_rho"] sigma = result.best_values["sigma"] ax.semilogy(q, iq, "ro", label="Form Factor") @@ -920,7 +920,7 @@ def show_saxs_qmap( if w < minW: img_ = img[cx - w // 2 : cx + w // 2, cy + w // 2 : cy + w // 2] # elif w > maxW: - # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = + # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = ROI = [max(0, center[0] - w), min(center[0] + w, lx), max(0, center[1] - w), min(ly, center[1] + w)] # print( ROI ) @@ -984,12 +984,12 @@ def show_saxs_qmap( return ax -######################## -##Fit sphere by scipy.leastsq fit +# +# Fit sphere by scipy.leastsq fit def fit_sphere_form_factor_func(parameters, ydata, xdata, yerror=None, nonvariables=None): - """##Develop by YG at July 28, 2017 @CHX + """#Develop by YG at July 28, 2017 @CHX This function is for fitting form factor of polyderse spherical particles by using scipy.leastsq fit radius, sigma, delta_rho, background = parameters @@ -1015,7 +1015,7 @@ def fit_sphere_form_factor_by_leastsq( pq, fit_range=None, ): - """##Develop by YG at July 28, 2017 @CHX + """#Develop by YG at July 28, 2017 @CHX Fitting form factor of polyderse spherical particles by using scipy.leastsq fit Input: radius, sigma, delta_rho, background = p0 @@ -1041,7 +1041,7 @@ def fit_sphere_form_factor_by_leastsq( def plot_fit_sphere_form_factor(q, pq, res, p0=None, xlim=None, ylim=None): - """##Develop by YG at July 28, 2017 @CHX""" + """#Develop by YG at July 28, 2017 @CHX""" if p0 is not None: radius, sigma, delta_rho, background = p0 diff --git a/pyCHX/Stitching.py b/pyCHX/Stitching.py index e78bdd3..6a01c34 100644 --- a/pyCHX/Stitching.py +++ b/pyCHX/Stitching.py @@ -1,13 +1,11 @@ -import os import re -import sys import matplotlib.pyplot as plt import numpy as np import PIL from scipy.signal import savgol_filter as sf -from pyCHX.chx_generic_functions import plot1D, show_img +from pyCHX.chx_generic_functions import plot1D from pyCHX.DataGonio import convert_Qmap @@ -95,7 +93,7 @@ def Correct_Overlap_Images_Intensities( fig = plt.figure()# figsize=[2,8]) for i in range(len(infiles)): - #print(i) + # print(i) ax = fig.add_subplot(1,8, i+1) d = process.load( infiles[i] ) show_img( dataM[i], logs = True, show_colorbar= False,show_ticks =False, @@ -180,14 +178,14 @@ def stitch_WAXS_in_Qspace(dataM, phis, calibration, dx=0, dy=22, dz=0, dq=0.015, phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV - #calibration.set_image_size( data.shape[1], data.shape[0] ) + # calibration.set_image_size( data.shape[1], data.shape[0] ) calibration.set_image_size(195, height=1475) # Pilatus300kW vertical calibration.set_pixel_size(pixel_size_um=172.0) calibration.set_beam_position(97.0, 1314.0) calibration.set_distance(0.275) Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) - #Get center of the qmap + # Get center of the qmap bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) print( bx, by ) @@ -214,7 +212,7 @@ def stitch_WAXS_in_Qspace(dataM, phis, calibration, dx=0, dy=22, dz=0, dq=0.015, # Intensity_mapN = np.zeros( (8, len(qzs), len(qxs)) ) for i in range(len(phis)): dM = np.rot90(dataM[i].T) - D = dM.ravel() + dM.ravel() phi = phis[i] calibration.set_angles(det_phi_g=phi, det_theta_g=0.0, offset_x=dx, offset_y=dy, offset_z=dz) calibration.clear_maps() @@ -321,7 +319,7 @@ def get_phi(filename, phi_offset=0, phi_start=4.5, phi_spacing=4.0, polarity=-1, return phi_c -############For CHX beamline +# For CHX beamline def get_qmap_qxyz_range( @@ -418,14 +416,14 @@ def stitch_WAXS_in_Qspace_CHX( phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV - #calibration.set_image_size( data.shape[1], data.shape[0] ) + # calibration.set_image_size( data.shape[1], data.shape[0] ) calibration.set_image_size(195, height=1475) # Pilatus300kW vertical calibration.set_pixel_size(pixel_size_um=172.0) calibration.set_beam_position(97.0, 1314.0) calibration.set_distance(0.275) Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) - #Get center of the qmap + # Get center of the qmap bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) print( bx, by ) """ diff --git a/pyCHX/Two_Time_Correlation_Function.py b/pyCHX/Two_Time_Correlation_Function.py index a110211..4e7f16c 100644 --- a/pyCHX/Two_Time_Correlation_Function.py +++ b/pyCHX/Two_Time_Correlation_Function.py @@ -1,31 +1,27 @@ -###################################################################################### -########Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL################ -########Time correlation function, include one-time, two-time, four-time############## -########Muli-tau method, array-operation method####################################### -###################################################################################### +# +# Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL# +# Time correlation function, include one-time, two-time, four-time# +# Muli-tau method, array-operation method# +# -import itertools -import sys import time -from datetime import datetime import matplotlib.pyplot as plt import numpy as np import skbeam.core.roi as roi from matplotlib import gridspec from matplotlib.colors import LogNorm -from modest_image import ModestImage, imshow +from modest_image import imshow from tqdm import tqdm # from pyCHX.chx_libs import colors_ as mcolors, markers_ as markers from pyCHX.chx_libs import RUN_GUI, Figure from pyCHX.chx_libs import colors from pyCHX.chx_libs import colors as colors_array -from pyCHX.chx_libs import lstyles from pyCHX.chx_libs import markers from pyCHX.chx_libs import markers as markers_array -from pyCHX.chx_libs import markers_copy, mcolors, multi_tau_lags +from pyCHX.chx_libs import multi_tau_lags def delays(num_lev=3, num_buf=4, time=1): @@ -71,7 +67,7 @@ def __init__(self, indexable, pixelist): # self.shape = indexable.shape try: self.length = len(indexable) - except: + except Exception: self.length = indexable.length def get_data(self): @@ -95,7 +91,7 @@ def __init__(self, indexable, mask): self.mask = mask try: self.shape = indexable.shape - except: + except Exception: # if self.shape = [len(indexable), indexable[0].shape[0], indexable[0].shape[1]] # self.shape = indexable.shape @@ -239,8 +235,7 @@ def auto_two_Array(data, rois, data_pixel=None): noframes = data_pixel.shape[0] g12b = np.zeros([noframes, noframes, noqs]) - Unitq = noqs / 10 - proi = 0 + noqs / 10 for qi in tqdm(range(1, noqs + 1)): pixelist_qi = np.where(qind == qi)[0] @@ -252,10 +247,10 @@ def auto_two_Array(data, rois, data_pixel=None): g12b[:, :, qi - 1] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] # print ( proi, int( qi //( Unitq) ) ) - # if int( qi //( Unitq) ) == proi: - # sys.stdout.write("#") - # sys.stdout.flush() - # proi += 1 + # if int( qi //( Unitq) ) == proi: + # sys.stdout.write("#") + # sys.stdout.flush() + # proi += 1 elapsed_time = time.time() - start_time print("Total time: %.2f min" % (elapsed_time / 60.0)) @@ -263,14 +258,14 @@ def auto_two_Array(data, rois, data_pixel=None): return g12b -#################################### -##Derivation of Two time correlation -##################################### +# +# Derivation of Two time correlation +# -##################################### +# # get one-time @different age -##################################### +# def get_qedge2(qstart, qend, qwidth, noqs, return_int=False): @@ -438,7 +433,7 @@ def get_aged_g2_from_g12q(g12q, age_edge, age_center=None, timeperframe=1, time_ arr = rotate_g12q_to_rectangle(g12q) m, n = arr.shape # m should be 2*n-1 # age_edge, age_center = get_qedge( qstart=slice_start,qend= slice_end, - # qwidth = slice_width, noqs =slice_num ) + # qwidth = slice_width, noqs =slice_num ) # print(arr.shape) age_edge = np.int_(age_edge) if age_center is None: @@ -592,7 +587,7 @@ def show_g12q_aged_g2( for i in range(len(age_center)): ps = linS1[1][i] - pe = linE1[0][i] + linE1[0][i] if ps >= N: s0 = ps - N s1 = N @@ -605,7 +600,7 @@ def show_g12q_aged_g2( # else:e0=pe;e1=0 ps = linS2[1][i] - pe = linE2[0][i] + linE2[0][i] if ps >= N: S0 = ps - N S1 = N @@ -705,7 +700,7 @@ def plot_aged_g2(g2_aged, tau=None, timeperframe=1, ylim=None, xlim=None): ax.set_ylim(xlim) -##################################### +# # get fout-time @@ -878,9 +873,9 @@ def histogram_taus(taus, hisbin=20, plot=True, timeperframe=1): return his -##################################### +# # get one-time -##################################### +# def get_one_time_from_two_time_old(g12, norms=None, nopr=None): @@ -1000,7 +995,7 @@ def get_four_time_from_two_time(g12, g2=None, rois=None): return g4f12 -###### +# def make_g12_mask(badframes_list, g12_shape): """ Dec 16, 2015, Y.G.@CHX @@ -1210,7 +1205,7 @@ def show_C12( else: timeperframe = 1 - if "timeoffset" in kwargs.keys(): ### added timeoffset here + if "timeoffset" in kwargs.keys(): # added timeoffset here timeoffset = kwargs["timeoffset"] else: timeoffset = 0 @@ -1253,7 +1248,7 @@ def show_C12( fig, ax = fig_ax # extent=[0, data.shape[0]*timeperframe, 0, data.shape[0]*timeperframe ] - extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset ### added timeoffset to extend + extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset # added timeoffset to extend if logs: im = imshow( diff --git a/pyCHX/XPCS_GiSAXS.py b/pyCHX/XPCS_GiSAXS.py index 8c57ff8..5d2c87f 100644 --- a/pyCHX/XPCS_GiSAXS.py +++ b/pyCHX/XPCS_GiSAXS.py @@ -6,16 +6,39 @@ from skbeam.core.accumulators.binned_statistic import BinnedStatistic1D, BinnedStatistic2D -from pyCHX.chx_compress import ( - Multifile, - compress_eigerdata, - get_avg_imgc, - init_compress_eigerdata, - read_compressed_eigerdata, -) +from pyCHX.chx_compress import Multifile, compress_eigerdata, get_avg_imgc from pyCHX.chx_correlationc import cal_g2c -from pyCHX.chx_generic_functions import * -from pyCHX.chx_libs import colors, colors_, markers, markers_ +from pyCHX.chx_generic_functions import ( + DataFrame, + LogNorm, + Model, + _vars, + apply_mask, + cal_g2, + cal_g2p, + datetime, + db, + get_detector, + get_each_frame_intensity, + get_qval_dict, + imgsar, + iqs, + lag_step_, + load_data, + make_axes_locatable, + mpl_plot, + np, + os, + plt, + psave_obj, + q, + qp_w, + reverse_updown, + roi, + save_arrays, + setup_pargs, +) +from pyCHX.chx_libs import colors, markers def get_gisaxs_roi2(qr_edge, qz_edge, qr_map, qz_map, mask=None, qval_dict=None): @@ -87,8 +110,8 @@ def get_gisaxs_roi(Qr, Qz, qr_map, qz_map, mask=None, qval_dict=None): return roi_mask, qval_dict -############ -##developed at Octo 11, 2016 +# +# developed at Octo 11, 2016 def get_qr(data, Qr, Qz, qr, qz, mask=None): """Octo 12, 2016, Y.G.@CHX plot one-d of I(q) as a function of qr for different qz @@ -102,12 +125,12 @@ def get_qr(data, Qr, Qz, qr, qz, mask=None): Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,.... Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -159,9 +182,9 @@ def get_qr(data, Qr, Qz, qr, qz, mask=None): return df -######################## +# # get one-d of I(q) as a function of qr for different qz -##################### +# def cal_1d_qr( @@ -194,14 +217,14 @@ def cal_1d_qr( Plot 1D cureve as a function of Qr for each Qz Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -294,7 +317,7 @@ def get_t_qrc(FD, frame_edge, Qr, Qz, qr, qz, mask=None, path=None, uid=None, sa """ Nt = len(frame_edge) - iqs = list(np.zeros(Nt)) + list(np.zeros(Nt)) qz_start, qz_end, qz_width, qz_num = Qz qz_edge, qz_center = get_qedge(qz_start, qz_end, qz_width, qz_num, verbose=False) # print('here') @@ -433,9 +456,9 @@ def plot_t_qrc(qr_1d, frame_edge, save=False, pargs=None, fontsize=8, *argv, **k ) -########################################## -###Functions for GiSAXS -########################################## +# +# Functions for GiSAXS +# def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): @@ -443,7 +466,7 @@ def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): y, x = np.indices([dim_z, dim_r]) Nr = int(dim_r / qp_w) Nz = int(dim_z / qz_w) - noqs = Nr * Nz + Nr * Nz ind = 1 for i in range(0, Nr): @@ -453,9 +476,9 @@ def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): return y -########################################### +# # for Q-map, convert pixel to Q -########################################### +# def convert_Qmap(img, qx_map, qy_map=None, bins=None, rangeq=None, mask=None, statistic="sum"): @@ -655,7 +678,6 @@ def get_qedge(qstart, qend, qwidth, noqs, verbose=True): return a qedge by giving the noqs, qstart,qend,qwidth. a qcenter, which is center of each qedge KEYWORD: None""" - import numpy as np if noqs != 1: spacing = (qend - qstart - noqs * qwidth) / (noqs - 1) # spacing between rings @@ -693,9 +715,9 @@ def get_qedge2( return qedge, qcenter -########################################### +# # for plot Q-map -########################################### +# def get_qmap_label(qmap, qedge): @@ -715,7 +737,7 @@ def get_qmap_label(qmap, qedge): def get_qzrmap(label_array_qz, label_array_qr, qz_center, qr_center): """April 20, 2016, Y.G.@CHX, get qzrmap""" - qzmax = label_array_qz.max() + label_array_qz.max() label_array_qr_ = np.zeros(label_array_qr.shape) ind = np.where(label_array_qr != 0) label_array_qr_[ind] = label_array_qr[ind] + 1e4 # add some large number to qr @@ -860,14 +882,14 @@ def get_1d_qr( Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -984,9 +1006,9 @@ def plot_qr_1d_with_ROI(qr_1d, qr_center, loglog=False, save=True, uid="uid", pa fig, ax = plt.subplots() Ncol = len(qr_1d.columns) - Nqr = Ncol % 2 + Ncol % 2 qz_center = qr_1d.columns[1::1] # qr_1d.columns[1::2] - Nqz = len(qz_center) + len(qz_center) for i, qzc_ in enumerate(qz_center): x = qr_1d[qr_1d.columns[0]] y = qr_1d[qzc_] @@ -1084,14 +1106,14 @@ def get_qr_tick_label(qr, label_array_qr, inc_x0, interp=True): w = np.where(rticks <= inc_x0)[0] rticks1 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) rticks_label1 = np.round(rticks_label[w], 3) - except: + except Exception: rticks_label1 = [] try: w = np.where(rticks > inc_x0)[0] rticks2 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) rticks = np.append(rticks1, rticks2) rticks_label2 = np.round(rticks_label[w], 3) - except: + except Exception: rticks_label2 = [] rticks_label = np.append(rticks_label1, rticks_label2) @@ -1170,16 +1192,16 @@ def get_qzr_map(qr, qz, inc_x0, Nzline=10, Nrline=10, interp=True, return_qrz_la labels_qz, indices_qz = roi.extract_label_indices(label_array_qz) labels_qr, indices_qr = roi.extract_label_indices(label_array_qr) - num_qz = len(np.unique(labels_qz)) - num_qr = len(np.unique(labels_qr)) + len(np.unique(labels_qz)) + len(np.unique(labels_qr)) zticks, zticks_label = get_qz_tick_label(qz, label_array_qz) # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) try: rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) - except: + except Exception: rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) # stride = int(len(zticks)/10) - ticks = [zticks, zticks_label, rticks, rticks_label] + [zticks, zticks_label, rticks, rticks_label] if return_qrz_label: return zticks, zticks_label, rticks, rticks_label, label_array_qr, label_array_qz else: @@ -1316,8 +1338,8 @@ def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10, interp=True, * labels_qz, indices_qz = roi.extract_label_indices(label_array_qz) labels_qr, indices_qr = roi.extract_label_indices(label_array_qr) - num_qz = len(np.unique(labels_qz)) - num_qr = len(np.unique(labels_qr)) + len(np.unique(labels_qz)) + len(np.unique(labels_qr)) fig, ax = plt.subplots(figsize=(8, 14)) @@ -1352,7 +1374,7 @@ def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10, interp=True, * # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) try: rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) - except: + except Exception: rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) # stride = int(len(zticks)/10) @@ -1821,7 +1843,7 @@ def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *a try: qz_center = res_pargs["qz_center"] qr_center = res_pargs["qr_center"] - except: + except Exception: roi_label = res_pargs["roi_label"] path = res_pargs["path"] @@ -1835,7 +1857,7 @@ def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *a for qz in qz_center: for qr in qr_center: columns.append([str(qz), str(qr)]) - except: + except Exception: columns.append([v for (k, v) in roi_label.items()]) df.columns = columns @@ -1914,10 +1936,10 @@ def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, # uid=res_pargs['uid'] num_rings = g2.shape[1] - beta = np.zeros(num_rings) # contrast factor - rate = np.zeros(num_rings) # relaxation rate - alpha = np.zeros(num_rings) # alpha - baseline = np.zeros(num_rings) # baseline + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline if function == "simple_exponential" or function == "simple": _vars = np.unique(_vars + ["alpha"]) @@ -2141,7 +2163,7 @@ def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, # GiSAXS End -############################### +# def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, plot_=True, *argv, **kwargs): @@ -2154,7 +2176,7 @@ def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, p mean_int_sets, index_list = roi.mean_intensity(np.array(data_series[::sampling]), box_mask) try: N = len(data_series) - except: + except Exception: N = data_series.length times = np.arange(N) * timeperframe # get the time for each frame num_rings = len(np.unique(box_mask)[1:]) @@ -2232,7 +2254,7 @@ def fit_qr_qz_rate(qr, qz, rate, plot_=True, *argv, **kwargs): for i, qz_ in enumerate(qz): try: y = np.array(rate["rate"][i * Nqr : (i + 1) * Nqr]) - except: + except Exception: y = np.array(rate[i * Nqr : (i + 1) * Nqr]) # print( len(x), len(y) ) @@ -2257,7 +2279,7 @@ def fit_qr_qz_rate(qr, qz, rate, plot_=True, *argv, **kwargs): ax.set_xlabel("$q^%s$" r"($\AA^{-2}$)" % power) dt = datetime.now() - CurTime = "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) + "%s%02d%02d-%02d%02d-" % (dt.year, dt.month, dt.day, dt.hour, dt.minute) # fp = path + 'Q%s-Rate--uid=%s'%(power,uid) + CurTime + '--Fit.png' fp = path + "uid=%s--Q-Rate" % (uid) + "--fit-.png" fig.savefig(fp, dpi=fig.dpi) @@ -2465,7 +2487,7 @@ def multi_uids_gisaxs_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2473,12 +2495,12 @@ def multi_uids_gisaxs_xpcs_analysis( os.makedirs(data_dir_, exist_ok=True) i += 1 if imgs != 0: - Nimg = len(imgs) + len(imgs) md_ = imgs.md useful_uids[run_seq + 1][i] = uid imgsr = reverse_updown(imgs) - imgsra = apply_mask(imgsr, maskr) + apply_mask(imgsr, maskr) if compress: filename = "/XF11ID/analysis/Compressed_Data" + "/uid_%s.cmp" % uid @@ -2498,7 +2520,7 @@ def multi_uids_gisaxs_xpcs_analysis( md["Measurement"] = db[uid]["start"]["Measurement"] # md['sample']=db[uid]['start']['sample'] # print( md['Measurement'] ) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2506,11 +2528,11 @@ def multi_uids_gisaxs_xpcs_analysis( lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms Ldet = md["detector_distance"] # detector to sample distance (mm), currently, *1000 for saxs, *1 for gisaxs - exposuretime = md["count_time"] + md["count_time"] acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... setup_pargs = dict( uid=uid, dpix=dpix, Ldet=Ldet, lambda_=lambda_, timeperframe=timeperframe, path=data_dir ) diff --git a/pyCHX/XPCS_SAXS.py b/pyCHX/XPCS_SAXS.py index c59f6cc..6028ed3 100644 --- a/pyCHX/XPCS_SAXS.py +++ b/pyCHX/XPCS_SAXS.py @@ -7,21 +7,50 @@ import os from pandas import DataFrame -from scipy.special import erf - -from pyCHX.chx_compress_analysis import ( - Multifile, - compress_eigerdata, - get_avg_imgc, - get_each_ring_mean_intensityc, - init_compress_eigerdata, - mean_intensityc, - read_compressed_eigerdata, -) -from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq + +from pyCHX.chx_compress_analysis import Multifile, compress_eigerdata, get_avg_imgc +from pyCHX.chx_correlationc import cal_g2c, get_pixelist_interp_iq from pyCHX.chx_correlationp import cal_g2p -from pyCHX.chx_generic_functions import * -from pyCHX.chx_libs import RUN_GUI, Figure, colors, colors_, colors_copy, markers, markers_, markers_copy +from pyCHX.chx_generic_functions import ( + Model, + apply_mask, + cal_g2, + cmap_albula, + create_hot_pixel_mask, + datetime, + db, + fit_q_rate, + fit_saxs_g2, + flow_para_function, + get_avg_img, + get_detector, + get_each_frame_intensity, + get_g2_fit, + get_qval_dict, + lag_step_, + load_data, + make_axes_locatable, + np, + plot1D, + plot_g2, + plt, + psave_obj, + q_ring_center, + result, + roi, + save_arrays, + save_g2, + save_g2_fit_para_tocsv, + save_lists, + save_saxs_g2, + show_img, + show_label_array_on_image, + stretched_auto_corr_scat_factor, + stretched_auto_corr_scat_factor_with_vibration, + uid, + utils, +) +from pyCHX.chx_libs import RUN_GUI, Figure, colors, markers def get_iq_invariant(qt, iqst): @@ -139,7 +168,7 @@ def get_delta_img(img, mask, setup_pargs, img_name="xx", plot=False): def combine_ring_anglar_mask(ring_mask, ang_mask): """combine ring and anglar mask""" - ring_max = ring_mask.max() + ring_mask.max() ang_mask_ = np.zeros(ang_mask.shape) ind = np.where(ang_mask != 0) ang_mask_[ind] = ang_mask[ind] + 1e9 # add some large number to qr @@ -152,7 +181,7 @@ def combine_ring_anglar_mask(ring_mask, ang_mask): ura = np.unique(ring_ang)[1:] ur = np.unique(ring_mask)[1:] - ua = np.unique(ang_mask)[real_ang_lab] + np.unique(ang_mask)[real_ang_lab] # print( np.unique( ring_mask )[1:], np.unique( ang_mask )[1:], np.unique( ring_ang )[1:] ) ring_ang_ = np.zeros_like(ring_ang) @@ -237,7 +266,7 @@ def combine_two_roi_mask(ring_mask, ang_mask, pixel_num_thres=10): """ rf = np.ravel(ring_mask) - af = np.ravel(ang_mask) + np.ravel(ang_mask) ruiq = np.unique(ring_mask) auiq = np.unique(ang_mask) maxa = np.max(auiq) @@ -378,15 +407,15 @@ def circular_average( image_mask = np.ravel(image) # if nx is None: #make a one-pixel width q - # nx = int( max_r - min_r) + # nx = int( max_r - min_r) # if min_x is None: - # min_x= int( np.min( binr)) - # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) + # min_x= int( np.min( binr)) + # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) # if max_x is None: - # max_x = int( np.max(binr )) - # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) + # max_x = int( np.max(binr )) + # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) # if nx is None: - # nx = max_x_ - min_x_ + # nx = max_x_ - min_x_ # binr_ = np.int_( binr /(np.sqrt(pixel_size[1]*pixel_size[0] )) ) binr_ = binr / (np.sqrt(pixel_size[1] * pixel_size[0])) @@ -453,7 +482,7 @@ def get_circular_average( avg_img, center, threshold=0, nx=nx, pixel_size=(dpix, dpix), mask=mask, min_x=min_x, max_x=max_x ) qp_ = qp * dpix - # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) + # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) two_theta = utils.radius_to_twotheta(Ldet, qp_) q = utils.twotheta_to_q(two_theta, lambda_) if plot_: @@ -477,7 +506,6 @@ def get_circular_average( ax1.set_xlabel("q (" r"$\AA^{-1}$)") ax1.set_ylabel("I(q)") title = ax1.set_title("uid= %s--Circular Average" % uid) - ax2 = None if "xlim" in kwargs.keys(): ax1.set_xlim(kwargs["xlim"]) x1, x2 = kwargs["xlim"] @@ -523,7 +551,6 @@ def plot_circular_average( ax1.set_xlabel("q (" r"$\AA^{-1}$)") ax1.set_ylabel("I(q)") title = ax1.set_title("%s_Circular Average" % uid) - ax2 = None if "xlim" in kwargs.keys(): xlim = kwargs["xlim"] else: @@ -1154,7 +1181,7 @@ def get_angular_mask( """ mask: 2D-array inner_angle # the starting angle in unit of degree - outer_angle # the ending angle in unit of degree + outer_angle # the ending angle in unit of degree width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI edges: default, None. otherwise, give a customized angle edges num_angles # number of angles @@ -1240,7 +1267,7 @@ def get_angular_mask_old( """ mask: 2D-array inner_angle # the starting angle in unit of degree - outer_angle # the ending angle in unit of degree + outer_angle # the ending angle in unit of degree width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI edges: default, None. otherwise, give a customized angle edges num_angles # number of angles @@ -1338,7 +1365,7 @@ def get_ring_mask( return_q_in_pixel=False, ): # def get_ring_mask( mask, inner_radius= 0.0020, outer_radius = 0.009, width = 0.0002, num_rings = 12, - # edges=None, unit='pixel',pargs=None ): + # edges=None, unit='pixel',pargs=None ): """ mask: 2D-array inner_radius #radius of the first ring @@ -1366,11 +1393,11 @@ def get_ring_mask( # qc = np.int_( np.linspace( inner_radius,outer_radius, num_rings ) ) # edges = np.zeros( [ len(qc), 2] ) # if width%2: - # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 # else: - # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 - # find the edges of the required rings + # find the edges of the required rings if edges is None: if num_rings != 1: spacing = (outer_radius - inner_radius - num_rings * width) / (num_rings - 1) # spacing between rings @@ -1408,7 +1435,7 @@ def get_ring_mask( def get_ring_anglar_mask(ring_mask, ang_mask, q_ring_center, ang_center): """get ring_anglar mask""" - ring_max = ring_mask.max() + ring_mask.max() ang_mask_ = np.zeros(ang_mask.shape) ind = np.where(ang_mask != 0) @@ -1700,9 +1727,9 @@ def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, retu # title_qa = '%.2f'%( ang_center[sn]) + r'$^\circ$' + '( %d )'%(i) # if num_qr==1: - # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa + # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa # else: - # title = title_qa + # title = title_qa title = title_qa ax.set_title(title, y=1.1, fontsize=12) y = g2[:, i] @@ -1730,9 +1757,9 @@ def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, retu return fig -############################################ -##a good func to fit g2 for all types of geogmetries -############################################ +# +# a good func to fit g2 for all types of geogmetries +# def fit_saxs_rad_ang_g2( @@ -1756,8 +1783,8 @@ def fit_saxs_rad_ang_g2( 'streched_exponential': fit by a streched exponential function, defined as beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline - #fit_vibration: - # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation + # fit_vibration: + # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation Returns ------- @@ -1801,14 +1828,14 @@ def fit_saxs_rad_ang_g2( print("Please give ang_center") num_rings = g2.shape[1] - beta = np.zeros(num_rings) # contrast factor - rate = np.zeros(num_rings) # relaxation rate - alpha = np.zeros(num_rings) # alpha - baseline = np.zeros(num_rings) # baseline + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline freq = np.zeros(num_rings) if function == "flow_para_function" or function == "flow_para": - flow = np.zeros(num_rings) # baseline + flow = np.zeros(num_rings) # baseline if "fit_variables" in kwargs: additional_var = kwargs["fit_variables"] _vars = [k for k in list(additional_var.keys()) if additional_var[k] is False] @@ -2116,7 +2143,7 @@ def multi_uids_saxs_flow_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector, reverse=True) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2125,8 +2152,8 @@ def multi_uids_saxs_flow_xpcs_analysis( i += 1 if imgs != 0: - imgsa = apply_mask(imgs, mask) - Nimg = len(imgs) + apply_mask(imgs, mask) + len(imgs) md_ = imgs.md useful_uids[run_seq + 1][i] = uid g2s[run_seq + 1][i] = {} @@ -2166,18 +2193,18 @@ def multi_uids_saxs_flow_xpcs_analysis( # md['sample']= 'PS205000-PMMA-207000-SMMA3' print(md["Measurement"]) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms Ldet = md["detector_distance"] * 1000 # detector to sample distance (mm) - exposuretime = md["count_time"] + md["count_time"] acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... center = md["center"] setup_pargs = dict( @@ -2192,7 +2219,7 @@ def multi_uids_saxs_flow_xpcs_analysis( md["avg_img"] = avg_img # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], - # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) min_inten = 10 # good_start = np.where( np.array(imgsum) > min_inten )[0][0] @@ -2209,7 +2236,7 @@ def multi_uids_saxs_flow_xpcs_analysis( print("The good_end frame number is: %s " % good_end_) norm = None - ################### + # # Do correlaton here for nconf, seg_mask in enumerate([seg_mask_v, seg_mask_p]): @@ -2436,7 +2463,7 @@ def multi_uids_saxs_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector, reverse=True) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2446,7 +2473,7 @@ def multi_uids_saxs_xpcs_analysis( i += 1 if imgs != 0: imgsa = apply_mask(imgs, mask) - Nimg = len(imgs) + len(imgs) md_ = imgs.md useful_uids[run_seq + 1][i] = uid if compress: @@ -2485,18 +2512,18 @@ def multi_uids_saxs_xpcs_analysis( # md['sample']= 'PS205000-PMMA-207000-SMMA3' print(md["Measurement"]) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms Ldet = md["detector_distance"] * 1000 # detector to sample distance (mm) - exposuretime = md["count_time"] + md["count_time"] acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... center = md["center"] setup_pargs = dict( @@ -2511,7 +2538,7 @@ def multi_uids_saxs_xpcs_analysis( md["avg_img"] = avg_img # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], - # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) min_inten = 10 # good_start = np.where( np.array(imgsum) > min_inten )[0][0] @@ -2646,7 +2673,7 @@ def plot_mul_g2(g2s, md): i = 0 for sub_seq in range(0, sub_num): # print( run_seq, sub_seq ) - uid = useful_uids[run_seq + 1][sub_seq + 1] + useful_uids[run_seq + 1][sub_seq + 1] sid = sids[i] if i == 0: title = r"$Q_r= $" + "%.5f " % (q_ring_center[sn]) + r"$\AA^{-1}$" @@ -2658,7 +2685,7 @@ def plot_mul_g2(g2s, md): # print ( len_tau, len(y)) # ax.semilogx(taus[1:len_], y[1:len_], marker = '%s'%next(markers_), color='%s'%next(colors_), - # markersize=6, label = '%s'%sid) + # markersize=6, label = '%s'%sid) ax.semilogx( taus[1:len_], y[1:len_], marker=markers[i], color=colors[i], markersize=6, label="%s" % sid diff --git a/pyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py b/pyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py index 062db0d..3d8a7d1 100644 --- a/pyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py +++ b/pyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py @@ -1,8 +1,67 @@ # python XPCS_XSVS_SAXS_Multi_2017_V4.py -from pyCHX.chx_packages import * -from pyCHX.chx_xpcs_xsvs_jupyter import run_xpcs_xsvs_single +from pyCHX.chx_packages import ( + Attachment, + center, + data_dir, + data_dir_average, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + find_uids, + get_averaged_data_from_multi_res, + get_contrast, + get_g2_fit_general, + get_his_std_from_pds, + get_q_rate_fit_general, + get_xsvs_fit, + getpass, + guids, + imgsa, + inc_x0, + load_mask, + make_pdf_report, + np, + os, + pdf_version, + pixel_mask, + plot1D, + plot_circular_average, + plot_each_ring_mean_intensityc, + plot_g2_contrast, + plot_g2_general, + plot_q_rate_fit_general, + plot_qIq_with_ROI, + plot_qr_1d_with_ROI, + plot_t_iqc, + plot_waterfallc, + plot_xsvs_fit, + psave_obj, + q_ring_center, + qr, + qr_1d_pds_label, + qth_interest, + qval_dict, + roi_mask, + run_time, + save_arrays, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_KM, + setup_pargs, + show_C12, + show_img, + show_qzr_roi, + show_ROI_on_image, + show_saxs_qmap, + ticks, + time, + timeperframe, + trans_data_to_pd, + uidstr, + update_olog_uid, +) def XPCS_XSVS_SAXS_Multi( @@ -13,8 +72,8 @@ def XPCS_XSVS_SAXS_Multi( uid_average="Au50_7p5PEGX1_vs_slow_120116", ): scat_geometry = run_pargs["scat_geometry"] - force_compress = run_pargs["force_compress"] - para_compress = run_pargs["para_compress"] + run_pargs["force_compress"] + run_pargs["para_compress"] run_fit_form = run_pargs["run_fit_form"] run_waterfall = run_pargs["run_waterfall"] run_t_ROI_Inten = run_pargs["run_t_ROI_Inten"] @@ -24,24 +83,24 @@ def XPCS_XSVS_SAXS_Multi( run_two_time = run_pargs["run_two_time"] run_four_time = run_pargs["run_four_time"] run_xsvs = run_pargs["run_xsvs"] - ############################################################### + # if scat_geometry != "saxs": # to be done for other types run_xsvs = False - ############################################################### + # att_pdf_report = run_pargs["att_pdf_report"] - show_plot = run_pargs["show_plot"] - CYCLE = run_pargs["CYCLE"] + run_pargs["show_plot"] + run_pargs["CYCLE"] mask_path = run_pargs["mask_path"] mask_name = run_pargs["mask_name"] good_start = run_pargs["good_start"] - use_imgsum_norm = run_pargs["use_imgsum_norm"] + run_pargs["use_imgsum_norm"] mask = load_mask(mask_path, mask_name, plot_=False, image_name="%s_mask" % mask_name, reverse=True) # mask *= pixel_mask mask[:, 2069] = 0 # False #Concluded from the previous results # np.save( data_dir + 'mask', mask) show_img(mask, image_name="%s_mask" % uid_average, save=True, path=data_dir) - mask_load = mask.copy() + mask.copy() username = getpass.getuser() data_dir0 = os.path.join("/XF11ID/analysis/", run_pargs["CYCLE"], username, "Results/") @@ -58,9 +117,7 @@ def XPCS_XSVS_SAXS_Multi( print(uids) uid = uids[0] - data_dir_ = data_dir - uid_ = uid_average - ### For Load results + # For Load results multi_res = {} for uid, fuid in zip(guids, fuids): @@ -485,7 +542,7 @@ def XPCS_XSVS_SAXS_Multi( export_xpcs_results_to_h5(uid + "_Res.h5", data_dir, export_dict=Exdt) # extract_dict = extract_xpcs_results_from_h5( filename = uid + '_Res.h5', import_dir = data_dir ) - ## Create PDF report for each uid + # Create PDF report for each uid pdf_out_dir = data_dir pdf_filename = "XPCS_Analysis_Report_for_%s%s.pdf" % (uid_average, pdf_version) if run_xsvs: @@ -505,7 +562,7 @@ def XPCS_XSVS_SAXS_Multi( run_xsvs, report_type=scat_geometry, ) - ### Attach each g2 result to the corresponding olog entry + # Attach each g2 result to the corresponding olog entry if att_pdf_report: os.environ["HTTPS_PROXY"] = "https://proxy:8888" os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" @@ -513,7 +570,7 @@ def XPCS_XSVS_SAXS_Multi( atch = [Attachment(open(pname, "rb"))] try: update_olog_uid(uid=fuids[-1], text="Add XPCS Averaged Analysis PDF Report", attachments=atch) - except: + except Exception: print( "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." % pname @@ -554,14 +611,14 @@ def XPCS_XSVS_SAXS_Multi( mask_path="/XF11ID/analysis/2016_3/masks/", mask_name="Nov28_4M_SAXS_mask.npy", good_start=5, - #####################################for saxs + # for saxs uniformq=True, inner_radius=0.005, # 0.005 for 50 nmAu/SiO2, 0.006, #for 10nm/coralpor outer_radius=0.04, # 0.04 for 50 nmAu/SiO2, 0.05, #for 10nm/coralpor num_rings=12, gap_ring_number=6, number_rings=1, - ############################for gi_saxs + # for gi_saxs # inc_x0 = 1473, # inc_y0 = 372, # refl_x0 = 1473, diff --git a/pyCHX/chx_Fitters2D.py b/pyCHX/chx_Fitters2D.py index a2f27ab..5af14ec 100644 --- a/pyCHX/chx_Fitters2D.py +++ b/pyCHX/chx_Fitters2D.py @@ -195,11 +195,11 @@ def __call__(self, XY, img, **kwargs): self.mod = Model(self.fitfunc, independent_vars=["XY"], param_names=self.params.keys()) # assumes first var is dependent var res = self.mod.fit(img.ravel(), XY=(XY[0].ravel(), XY[1].ravel()), params=params, **kwargs) - ## old version, only return values + # old version, only return values # add reduced chisq to parameter list # res.best_values['chisq']=res.redchi # return res.best_values - ## new version, also return the std + # new version, also return the std resf = {} ks = list(res.params.keys()) for var in ks: diff --git a/pyCHX/chx_compress.py b/pyCHX/chx_compress.py index 16e9881..88743d5 100644 --- a/pyCHX/chx_compress.py +++ b/pyCHX/chx_compress.py @@ -1,11 +1,8 @@ -import gc import os import pickle as pkl import shutil import struct import sys -from contextlib import closing -from glob import iglob from multiprocessing import Pool import dill @@ -14,7 +11,7 @@ # imports handler from CHX # this is where the decision is made whether or not to use dask # from chxtools.handlers import EigerImages, EigerHandler -from eiger_io.fs_handler import EigerHandler, EigerImages +from eiger_io.fs_handler import EigerImages from tqdm import tqdm from pyCHX.chx_generic_functions import ( @@ -28,7 +25,7 @@ reverse_updown, rot90_clockwise, ) -from pyCHX.chx_libs import RUN_GUI, LogNorm, datetime, db, getpass, np, os, roi, time +from pyCHX.chx_libs import RUN_GUI, LogNorm, db, np, os, roi, time def run_dill_encoded(what): @@ -48,8 +45,7 @@ def pass_FD(FD, n): # FD.rdframe(n) try: FD.seekimg(n) - except: - pass + except Exception: return False @@ -244,7 +240,7 @@ def read_compressed_eigerdata( else: try: mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) - except: + except Exception: CAL = True if CAL: FD = Multifile(filename, beg, end) @@ -376,8 +372,7 @@ def para_compress_eigerdata( print("No bad frames are involved.") print("Combining the seperated compressed files together...") combine_compressed(filename, Nf, del_old=True) - del results - del res_ + if with_pickle: pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) if copy_rawdata: @@ -463,7 +458,7 @@ def para_segment_compress_eigerdata( inputs = range(num_max_para_process * nr, Nf) else: inputs = range(num_max_para_process * nr, num_max_para_process * (nr + 1)) - fns = [filename + "_temp-%i.tmp" % i for i in inputs] + [filename + "_temp-%i.tmp" % i for i in inputs] # print( nr, inputs, ) pool = Pool(processes=len(inputs)) # , maxtasksperchild=1000 ) # print( inputs ) @@ -536,7 +531,7 @@ def segment_compress_eigerdata( Nimg_ = len(images) M, N = images[0].shape avg_img = np.zeros([M, N], dtype=np.float64) - Nopix = float(avg_img.size) + float(avg_img.size) n = 0 good_count = 0 # frac = 0.0 @@ -583,14 +578,14 @@ def segment_compress_eigerdata( fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) else: fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 - del p, v, img + fp.flush() fp.close() avg_img /= good_count bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) sys.stdout.write("#") sys.stdout.flush() - # del images, mask, avg_img, imgsum, bad_frame_list + # # print( 'Should release memory here') return mask, avg_img, imgsum, bad_frame_list @@ -904,7 +899,7 @@ def __init__(self, filename, beg, end, reverse=False): NOTE: At each record n, the file cursor points to record n+1 """ self.FID = open(filename, "rb") - # self.FID.seek(0,os.SEEK_SET) + # self.FID.seek(0,os.SEEK_SET) self.filename = filename # br: bytes read br = self.FID.read(1024) @@ -1378,8 +1373,6 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): for i in inputs: mean_intensity[:, i] = res[i] print("ROI mean_intensit calculation is DONE!") - del results - del res mean_intensity /= norm return mean_intensity, index diff --git a/pyCHX/chx_compress_analysis.py b/pyCHX/chx_compress_analysis.py index 102ddfa..7182883 100644 --- a/pyCHX/chx_compress_analysis.py +++ b/pyCHX/chx_compress_analysis.py @@ -1,50 +1,21 @@ from __future__ import absolute_import, division, print_function import logging -import os -import struct -from collections import namedtuple import matplotlib.pyplot as plt -from skbeam.core.roi import extract_label_indices -from skbeam.core.utils import multi_tau_lags from tqdm import tqdm from pyCHX.chx_generic_functions import save_arrays # from pyCHX.chx_generic_functions import (get_circular_average) # from pyCHX.XPCS_SAXS import (get_circular_average) -from pyCHX.chx_libs import ( - RUN_GUI, - Figure, - LogNorm, - colors, - colors_, - datetime, - db, - getpass, - markers, - markers_, - np, - os, - roi, - time, -) +from pyCHX.chx_libs import RUN_GUI, Figure, colors, markers, np, roi logger = logging.getLogger(__name__) from modest_image import imshow -from pyCHX.chx_compress import ( - Multifile, - compress_eigerdata, - get_avg_imgc, - get_each_frame_intensityc, - init_compress_eigerdata, - mean_intensityc, - pass_FD, - read_compressed_eigerdata, -) +from pyCHX.chx_compress import get_avg_imgc, mean_intensityc from pyCHX.chx_generic_functions import find_bad_pixels_FD # from pyCHX.chx_compress import * @@ -97,8 +68,7 @@ def get_time_edge_avg_img(FD, frame_edge, show_progress=True, apply_threshold=Fa def plot_imgs(imgs, image_name=None, *argv, **kwargs): # NOT WORKing NOW.... N = len(imgs) - sx = np.ceil(np.sqrt(N)) - pass + np.ceil(np.sqrt(N)) def cal_waterfallc( diff --git a/pyCHX/chx_correlation.py b/pyCHX/chx_correlation.py index d636ae7..3ed70b7 100644 --- a/pyCHX/chx_correlation.py +++ b/pyCHX/chx_correlation.py @@ -1,27 +1,27 @@ -# ###################################################################### +# # # Developed at the NSLS-II, Brookhaven National Laboratory # -# # +# # # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # -# # +# # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # -# # +# # # * Redistributions of source code must retain the above copyright # -# notice, this list of conditions and the following disclaimer. # -# # +# notice, this list of conditions and the following disclaimer. # +# # # * Redistributions in binary form must reproduce the above copyright # -# notice this list of conditions and the following disclaimer in # -# the documentation and/or other materials provided with the # -# distribution. # -# # +# notice this list of conditions and the following disclaimer in # +# the documentation and/or other materials provided with the # +# distribution. # +# # # * Neither the name of the Brookhaven Science Associates, Brookhaven # -# National Laboratory nor the names of its contributors may be used # -# to endorse or promote products derived from this software without # -# specific prior written permission. # -# # +# National Laboratory nor the names of its contributors may be used # +# to endorse or promote products derived from this software without # +# specific prior written permission. # +# # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # @@ -34,7 +34,7 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # -######################################################################## +# """ This module is for functions specific to time correlation @@ -131,7 +131,7 @@ def _one_time_process( future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] @@ -406,7 +406,7 @@ def multi_tau_auto_corr(num_levels, num_bufs, labels, images): def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): """ - This model will provide normalized intensity-intensity time + This mo correlation data to be minimized. Parameters ---------- @@ -662,7 +662,7 @@ def _two_time_process( img_per_level[level] += 1 # in multi-tau correlation other than first level all other levels - # have to do the half of the correlation + # have to do the half of the correlation if level == 0: i_min = 0 else: @@ -674,7 +674,7 @@ def _two_time_process( past_img = buf[level, delay_no] future_img = buf[level, buf_no] - # get the matrix of correlation function without normalizations + # get the matrix of correlation function without normalizations tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations pi_binned = np.bincount(label_array, weights=past_img)[1:] @@ -882,7 +882,7 @@ class CrossCorrelator: >> cimg = cc(img1) or, mask may m >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted @@ -957,7 +957,7 @@ def __init__(self, shape, mask=None, normalization=None, wrap=False): self.nids = len(self.ids) self.maskcorrs = list() - # regions where the correlations are not zero + self.pxlst_maskcorrs = list() # basically saving bunch of mask related stuff like indexing etc, just diff --git a/pyCHX/chx_correlationc.py b/pyCHX/chx_correlationc.py index 02bc754..52f0c39 100644 --- a/pyCHX/chx_correlationc.py +++ b/pyCHX/chx_correlationc.py @@ -86,7 +86,7 @@ def _one_time_process( past_img = buf[level, delay_no] future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images @@ -176,7 +176,7 @@ def _one_time_process_error( past_img = buf[level, delay_no] future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images @@ -186,13 +186,13 @@ def _one_time_process_error( else: # for w, arr in zip([past_img*future_img, past_img, future_img], - # [G, past_intensity_norm, future_intensity_norm, - # ]): - # binned = np.bincount(label_array, weights=w)[1:] - # #nonz = np.where(w)[0] - # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] - # arr[t_index] += ((binned / num_pixels - - # arr[t_index]) / normalize) + # [G, past_intensity_norm, future_intensity_norm, + # ]): + # binned = np.bincount(label_array, weights=w)[1:] + # #nonz = np.where(w)[0] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # arr[t_index] += ((binned / num_pixels - + # arr[t_index]) / normalize) for w, arr in zip( [past_img * future_img, past_img, future_img], [ @@ -867,7 +867,7 @@ def lazy_one_time_debug( def auto_corr_scat_factor(lags, beta, relaxation_rate, baseline=1): """ - This model will provide normalized intensity-intensity time + This mo correlation data to be minimized. Parameters ---------- @@ -968,7 +968,7 @@ def lazy_two_time( ): # def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, - # two_time_internal_state=None): + # two_time_internal_state=None): """Generator implementation of two-time correlation If you do not want multi-tau correlation, set num_levels to 1 and num_bufs to the number of images you wish to correlate @@ -1179,7 +1179,7 @@ def _two_time_process( img_per_level[level] += 1 # in multi-tau correlation other than first level all other levels - # have to do the half of the correlation + # have to do the half of the correlation if level == 0: i_min = 0 else: @@ -1193,7 +1193,7 @@ def _two_time_process( # print( np.sum( past_img ), np.sum( future_img )) - # get the matrix of correlation function without normalizations + # get the matrix of correlation function without normalizations tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations pi_binned = np.bincount(label_array, weights=past_img)[1:] @@ -1394,7 +1394,7 @@ def cal_g2c( g_max = min(g_max1, g_max2) # print(g_max) # g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * - # s.future_intensity[:g_max])) + # s.future_intensity[:g_max])) g2[:g_max, qi - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) g2_err[:g_max, qi - 1] = np.sqrt( (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 @@ -1453,9 +1453,9 @@ def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, nor if end is None: self.end = FD.end # if self.beg ==0: - # self.length = self.end - self.beg + # self.length = self.end - self.beg # else: - # self.length = self.end - self.beg + 1 + # self.length = self.end - self.beg + 1 self.length = self.end - self.beg @@ -1504,18 +1504,18 @@ def get_data(self): if self.mean_int_sets is not None: # for each frame will normalize each ROI by it's averaged value for j in range(noqs): # if i ==100: - # if j==0: - # print( self.mean_int_sets[i][j] ) - # print( qind_[ noprs[j]: noprs[j+1] ] ) + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] # if i==100: - # print( i, Mean_Int_Qind[ self.qind== 11 ]) + # print( i, Mean_Int_Qind[ self.qind== 11 ]) # print('Do norm_mean_int here') # if i ==10: - # print( norm_Mean_Int_Qind ) + # print( norm_Mean_Int_Qind ) else: norm_Mean_Int_Qind = 1.0 if self.imgsum is not None: @@ -1529,7 +1529,7 @@ def get_data(self): norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi # if i==100: - # print(norm_Mean_Int_Qind[:100]) + # print(norm_Mean_Int_Qind[:100]) data_array[n][pxlist] = v[w] / norms n += 1 @@ -1562,9 +1562,9 @@ def __init__(self, FD, pixelist, beg=None, end=None, norm=None, imgsum=None, mea if end is None: self.end = FD.end # if self.beg ==0: - # self.length = self.end - self.beg + # self.length = self.end - self.beg # else: - # self.length = self.end - self.beg + 1 + # self.length = self.end - self.beg + 1 self.length = self.end - self.beg @@ -1608,18 +1608,18 @@ def get_data(self): if self.mean_int_sets is not None: # for normalization of each averaged ROI of each frame for j in range(noqs): # if i ==100: - # if j==0: - # print( self.mean_int_sets[i][j] ) - # print( qind_[ noprs[j]: noprs[j+1] ] ) + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] # if i==100: - # print( i, Mean_Int_Qind[ self.qind== 11 ]) + # print( i, Mean_Int_Qind[ self.qind== 11 ]) # print('Do norm_mean_int here') # if i ==10: - # print( norm_Mean_Int_Qind ) + # print( norm_Mean_Int_Qind ) else: norm_Mean_Int_Qind = 1.0 if self.imgsum is not None: @@ -1638,7 +1638,7 @@ def get_data(self): norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi # if i==100: - # print(norm_Mean_Int_Qind[:100]) + # print(norm_Mean_Int_Qind[:100]) data_array[n][pxlist] = v[w] / norms n += 1 @@ -1686,7 +1686,7 @@ def auto_two_Arrayc(data_pixel, rois, index=None): try: g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1753,7 +1753,7 @@ def auto_two_Arrayc_ExplicitNorm(data_pixel, rois, norm=None, index=None): try: g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1818,7 +1818,7 @@ def two_time_norm(data_pixel, rois, index=None): try: norm = np.zeros(len(qlist)) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1860,7 +1860,7 @@ def check_normalization(frame_num, q_list, imgsa, data_pixel): plot1D(raw_data, ax=ax[0], legend="q=%s" % (q), m=markers[n], title="fra=%s_raw_data" % (frame_num)) # plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], - # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) # print( mean_int_sets_[frame_num][q-1] ) plot1D( norm_data, diff --git a/pyCHX/chx_correlationp.py b/pyCHX/chx_correlationp.py index 496ec67..bf589c4 100644 --- a/pyCHX/chx_correlationp.py +++ b/pyCHX/chx_correlationp.py @@ -7,21 +7,17 @@ from __future__ import absolute_import, division, print_function import logging -import sys -from collections import namedtuple from multiprocessing import Pool -import dill import numpy as np import skbeam.core.roi as roi from skbeam.core.roi import extract_label_indices -from skbeam.core.utils import multi_tau_lags -from pyCHX.chx_compress import apply_async, go_through_FD, map_async, pass_FD, run_dill_encoded +from pyCHX.chx_compress import apply_async, pass_FD from pyCHX.chx_correlationc import _one_time_process as _one_time_processp from pyCHX.chx_correlationc import _one_time_process_error as _one_time_process_errorp from pyCHX.chx_correlationc import _two_time_process as _two_time_processp -from pyCHX.chx_correlationc import _validate_and_transform_inputs, get_pixelist_interp_iq +from pyCHX.chx_correlationc import _validate_and_transform_inputs from pyCHX.chx_libs import tqdm logger = logging.getLogger(__name__) @@ -344,8 +340,7 @@ def cal_c12p(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_le lag_steps = res[0][1] print("G2 calculation DONE!") - del results - del res + return c12, lag_steps[lag_steps < noframes] @@ -598,7 +593,7 @@ def lazy_one_timep( g_max = min(g_max1, g_max2) g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) # sys.stdout.write('#') - # del FD + # # sys.stdout.flush() # print (g2) # return results(g2, s.lag_steps[:g_max], s) @@ -677,9 +672,9 @@ def cal_g2p( res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] len_lag = 10**10 for i in inputs: # to get the smallest length of lag_step, - ##***************************** - ##Here could result in problem for significantly cut useful data if some Q have very short tau list - ##**************************** + # ***************************** + # Here could result in problem for significantly cut useful data if some Q have very short tau list + # **************************** if len_lag > len(res[i][1]): lag_steps = res[i][1] len_lag = len(lag_steps) @@ -731,8 +726,6 @@ def cal_g2p( if len(lag_steps_err) < len(lag_stepsi): lag_steps_err = lag_stepsi - del results - del res if cal_error: print("G2 with error bar calculation DONE!") return g2[:Gmax, :], lag_steps_err[:Gmax], g2_err[:Gmax, :] / np.sqrt(nopr) @@ -810,15 +803,13 @@ def cal_GPF( g2_G = np.zeros((int((num_lev + 1) * num_buf / 2), len(pixelist))) g2_P = np.zeros_like(g2_G) g2_F = np.zeros_like(g2_G) - Gmax = 0 - lag_steps_err = res[0][1] + res[0][1] # print('Here') for i in inputs: g2_G[:, qind == 1 + i] = res[i][2] # [:len_lag] g2_P[:, qind == 1 + i] = res[i][3] # [:len_lag] g2_F[:, qind == 1 + i] = res[i][4] # [:len_lag] - del results - del res + return g2_G, g2_P, g2_F @@ -840,12 +831,12 @@ def get_g2_from_ROI_GPF(G, P, F, roi_mask): g2 = np.zeros([G.shape[0], noqs]) g2_err = np.zeros([G.shape[0], noqs]) for i in range(1, 1 + noqs): - ## G[0].shape is the same as roi_mask shape + # G[0].shape is the same as roi_mask shape if len(G.shape) > 2: s_Gall_qi = G[:, roi_mask == i] s_Pall_qi = P[:, roi_mask == i] s_Fall_qi = F[:, roi_mask == i] - ## G[0].shape is the same length as pixelist + # G[0].shape is the same length as pixelist else: s_Gall_qi = G[:, qind == i] s_Pall_qi = P[:, qind == i] @@ -928,7 +919,7 @@ def auto_two_Arrayp(data_pixel, rois, index=None): # pool = Pool(processes= len(inputs) ) # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], - # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] # res = [r.get() for r in results] pool = Pool(processes=len(inputs)) diff --git a/pyCHX/chx_correlationp2.py b/pyCHX/chx_correlationp2.py index 8ddbc19..6ec77c3 100644 --- a/pyCHX/chx_correlationp2.py +++ b/pyCHX/chx_correlationp2.py @@ -9,21 +9,17 @@ from __future__ import absolute_import, division, print_function import logging -import sys -from collections import namedtuple from multiprocessing import Pool -import dill import numpy as np import skbeam.core.roi as roi from skbeam.core.roi import extract_label_indices -from skbeam.core.utils import multi_tau_lags -from pyCHX.chx_compress import apply_async, go_through_FD, map_async, pass_FD, run_dill_encoded +from pyCHX.chx_compress import apply_async, pass_FD from pyCHX.chx_correlationc import _one_time_process as _one_time_processp from pyCHX.chx_correlationc import _one_time_process_error as _one_time_process_errorp from pyCHX.chx_correlationc import _two_time_process as _two_time_processp -from pyCHX.chx_correlationc import _validate_and_transform_inputs, get_pixelist_interp_iq +from pyCHX.chx_correlationc import _validate_and_transform_inputs from pyCHX.chx_libs import tqdm logger = logging.getLogger(__name__) @@ -334,8 +330,7 @@ def cal_c12p(FD, ring_mask, bad_frame_list=None, good_start=0, num_buf=8, num_le lag_steps = res[0][1] print("G2 calculation DONE!") - del results - del res + return c12, lag_steps[lag_steps < noframes] @@ -580,7 +575,7 @@ def lazy_one_timep( g_max = min(g_max1, g_max2) g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) # sys.stdout.write('#') - # del FD + # # sys.stdout.flush() # print (g2) # return results(g2, s.lag_steps[:g_max], s) @@ -654,9 +649,9 @@ def cal_g2p( res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] len_lag = 10**10 for i in inputs: # to get the smallest length of lag_step, - ##***************************** - ##Here could result in problem for significantly cut useful data if some Q have very short tau list - ##**************************** + # ***************************** + # Here could result in problem for significantly cut useful data if some Q have very short tau list + # **************************** if len_lag > len(res[i][1]): lag_steps = res[i][1] len_lag = len(lag_steps) @@ -715,8 +710,6 @@ def cal_g2p( g2_P[:, nopr_[i] : nopr_[i + 1]] = s_Pall_qi g2_F[:, nopr_[i] : nopr_[i + 1]] = s_Fall_qi - del results - del res if cal_error: print("G2 with error bar calculation DONE!") return g2[:Gmax, :], lag_steps_err[:Gmax], g2_err[:Gmax, :] / np.sqrt(nopr), g2_G, g2_P, g2_F @@ -775,7 +768,7 @@ def auto_two_Arrayp(data_pixel, rois, index=None): # pool = Pool(processes= len(inputs) ) # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], - # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] # res = [r.get() for r in results] pool = Pool(processes=len(inputs)) diff --git a/pyCHX/chx_crosscor.py b/pyCHX/chx_crosscor.py index 738be4e..7c1d860 100644 --- a/pyCHX/chx_crosscor.py +++ b/pyCHX/chx_crosscor.py @@ -1,22 +1,18 @@ # Develop new version # Original from #/XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/chx_correlation.py -# ###################################################################### +# # # Let's change from mask's to indices -######################################################################## +# """ This module is for functions specific to spatial correlation in order to tackle the motion of speckles """ from __future__ import absolute_import, division, print_function -from collections import namedtuple - import numpy as np from scipy.signal import fftconvolve -from skbeam.core.roi import extract_label_indices # from __future__ import absolute_import, division, print_function -from skbeam.core.utils import multi_tau_lags # for a convenient status bar try: @@ -57,7 +53,7 @@ def direct_corss_cor(im1, im2): elif j < 0: d1 = im1[:j, :] d2 = im2[-j:, :] - else: ##j>0 + else: # j>0 d1 = im1[j:, :] d2 = im2[:-j, :] elif i < 0: @@ -67,7 +63,7 @@ def direct_corss_cor(im1, im2): elif j < 0: d1 = im1[:j, :i] d2 = im2[-j:, -i:] - else: ##j>0 + else: # j>0 d1 = im1[j:, :i] d2 = im2[:-j, -i:] else: # i>0: @@ -77,7 +73,7 @@ def direct_corss_cor(im1, im2): elif j < 0: d1 = im1[:j, i:] d2 = im2[-j:, :-i] - else: ##j>0 + else: # j>0 d1 = im1[j:, i:] d2 = im2[:-j, :-i] # print(i,j) @@ -101,7 +97,7 @@ class CrossCorrelator2: >> cimg = cc(img1) or, mask may may be ids >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted @@ -254,7 +250,7 @@ def __call__(self, img1, img2=None, normalization=None, check_res=False): # ccorr = np.fft.fftshift(ccorr) ccorr = _centered(ccorr, self.sizes[reg, :]) else: - ndim = img1.ndim + img1.ndim tmpimg2 = np.zeros_like(tmpimg) tmpimg2[i, j] = img2[ii, jj] im2 = np.fft.rfftn(tmpimg2, fshape) # image 2 @@ -263,15 +259,15 @@ def __call__(self, img1, img2=None, normalization=None, check_res=False): ccorr = _centered(ccorr, self.sizes[reg, :]) # print('here') - ###check here + # check here if check_res: if reg == 0: self.norm = maskcor self.ck = ccorr.copy() - # print(ccorr.max()) + # print(ccorr.max()) self.tmp = tmpimg self.fs = fshape - ###end the check + # end the check # now handle the normalizations if "symavg" in normalization: @@ -307,10 +303,10 @@ def __call__(self, img1, img2=None, normalization=None, check_res=False): if check_res: if reg == 0: self.ckn = ccorr.copy() - # print('here') - # print( np.average(tmpimg[w]) ) - # print( maskcor[w] ) - # print( ccorr.max(), maskcor[w], np.average(tmpimg[w]), np.average(tmpimg2[w]) ) + # print('here') + # print( np.average(tmpimg[w]) ) + # print( maskcor[w] ) + # print( ccorr.max(), maskcor[w], np.average(tmpimg[w]), np.average(tmpimg2[w]) ) ccorrs.append(ccorr) if len(ccorrs) == 1: @@ -328,66 +324,22 @@ def _centered(img, sz): return img -##define a custmoized fftconvolve +# define a custmoized fftconvolve -######################################################################################## +# # modifided version from signaltools.py in scipy (Mark March 2017) # Author: Travis Oliphant # 1999 -- 2002 import threading -import warnings # from . import sigtools import numpy as np -from numpy import ( - allclose, - angle, - arange, - argsort, - array, - asarray, - atleast_1d, - atleast_2d, - cast, - dot, - exp, - expand_dims, - iscomplexobj, - isscalar, - mean, - ndarray, - newaxis, - ones, - pi, - poly, - polyadd, - polyder, - polydiv, - polymul, - polysub, - polyval, - prod, - product, - r_, - ravel, - real_if_close, - reshape, - roots, - sort, - sum, - take, - transpose, - unique, - where, - zeros, - zeros_like, -) +from numpy import array, asarray from numpy.fft import irfftn, rfftn from numpy.lib import NumpyVersion -from scipy import linalg -from scipy.fftpack import fft, fft2, fftfreq, fftn, ifft, ifft2, ifftn, ifftshift +from scipy.fftpack import fftn, ifftn # from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext @@ -570,7 +522,7 @@ class CrossCorrelator1: >> cimg = cc(img1) or, mask may may be ids >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted @@ -677,7 +629,7 @@ def __init__(self, shape, mask=None, normalization=None): maskcorr = _cross_corr1(submask) # quick fix for #if self.wrap is False: - # submask = _expand_image1(submask)finite numbers should be integer so + # submask = _expand_image1(submask)finite numbers should be integer so # choose some small value to threshold maskcorr *= maskcorr > 0.5 self.maskcorrs.append(maskcorr) @@ -772,12 +724,10 @@ def __call__(self, img1, img2=None, normalization=None, desc="cc"): return ccorrs -##for parallel +# for parallel from multiprocessing import Pool -import dill - -from pyCHX.chx_compress import apply_async, map_async +from pyCHX.chx_compress import apply_async def run_para_ccorr_sym(ccorr_sym, FD, nstart=0, nend=None, imgsum=None, img_norm=None): @@ -825,7 +775,4 @@ def run_para_ccorr_sym(ccorr_sym, FD, nstart=0, nend=None, imgsum=None, img_norm for i in range(Nc): cc[i] = cc[i] / N - del results - del res - return cc diff --git a/pyCHX/chx_generic_functions.py b/pyCHX/chx_generic_functions.py index fef6168..e71f8ac 100644 --- a/pyCHX/chx_generic_functions.py +++ b/pyCHX/chx_generic_functions.py @@ -1,23 +1,62 @@ import copy import datetime from os import listdir -from shutil import copyfile import matplotlib.cm as mcm import numpy as np import PIL import pytz import scipy -from matplotlib import cm from modest_image import imshow from scipy.special import erf -from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta, twotheta_to_q -from skimage.draw import disk, ellipse, line, line_aa, polygon +from skbeam.core.utils import angle_grid, radial_grid, radius_to_twotheta +from skimage.draw import disk, ellipse, polygon from skimage.filters import prewitt # from tqdm import * -from pyCHX.chx_libs import * -from pyCHX.chx_libs import colors, markers +from pyCHX.chx_libs import ( + RUN_GUI, + DataFrame, + Figure, + Image, + LogNorm, + Model, + _mask_path_, + center, + cmap_albula, + cmap_vge_hdr, + colors, + colors_, + combine_two_roi_mask, + corr, + db, + figsize, + get_angular_mask, + get_avg_imgc, + get_circular_average, + get_ring_mask, + getpass, + h5py, + json, + markers, + markers_, + md, + mpl_plot, + multi_tau_lags, + os, + pickle, + pims, + plt, + power, + roi, + scale_rgb, + time, + tqdm, + uid, + uids, + utils, + warnings, +) markers = [ "o", @@ -47,7 +86,10 @@ markers = np.array(markers * 100) -flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +def flatten_nestlist(l): + return [item for sublist in l for item in sublist] + + """a function to flatten a nest list e.g., flatten( [ ['sg','tt'],'ll' ] ) gives ['sg', 'tt', 'l', 'l'] @@ -111,7 +153,7 @@ def fit_one_peak_curve(x, y, fit_range=None): peak = LorentzianModel() background = LinearModel() model = peak + background - if fit_range != None: + if fit_range is not None: x1, x2 = fit_range xf = x[x1:x2] yf = y[x1:x2] @@ -164,7 +206,7 @@ def plot_xy_with_fit( # txts = r'$\beta$' + r'$ = %.3f$'%(beta[i]) + r'$ s^{-1}$' ax.text(x=0.02, y=0.1, s=txts, fontsize=14, transform=ax.transAxes) plt.tight_layout() - if filename != None: + if filename is not None: plt.savefig(filename) return ax @@ -379,7 +421,7 @@ def get_SG_norm(FD, pixelist, bins=1, mask=None, window_size=11, order=5): Return: norm: shape as ( length of FD, length of pixelist ) """ - if mask == None: + if mask is None: mask = 1 beg = FD.beg end = FD.end @@ -459,7 +501,7 @@ def shift_mask(new_cen, new_mask, old_cen, old_roi_mask, limit_qnum=None): # qm = nopr>0 for j, qv in enumerate(qu): nroi_mask[nroi_mask_ == qv] = j + 1 - if limit_qnum != None: + if limit_qnum is not None: nroi_mask[nroi_mask > limit_qnum] = 0 return nroi_mask @@ -722,13 +764,13 @@ def plot_q_rate_general( if Nqz != 1: legend = ax.legend(loc="best") - if plot_index_range != None: + if plot_index_range is not None: d1, d2 = plot_index_range d2 = min(len(x) - 1, d2) ax.set_xlim((x**power)[d1], (x**power)[d2]) ax.set_ylim(y[d1], y[d2]) - if ylim != None: + if ylim is not None: ax.set_ylim(ylim) ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$) (log)") @@ -769,11 +811,11 @@ def plot_xy_x2( kwargs: could include xlim (in unit of index), ylim (in unit of real value) """ - if fig_ax == None: + if fig_ax is None: fig, ax1 = plt.subplots() else: fig, ax1 = fig_ax - if pargs != None: + if pargs is not None: uid = pargs["uid"] path = pargs["path"] else: @@ -802,7 +844,7 @@ def plot_xy_x2( lx1, lx2 = xlim ax1.set_xlim([x[lx1], x[lx2]]) ax1.set_ylim(ylim) - if x2 != None: + if x2 is not None: ax2 = ax1.twiny() ax2.set_xlabel(xlabel2) ax2.set_ylabel(ylabel) @@ -820,7 +862,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 tifs = list(db[uid].data("OAV_image"))[0] try: pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) - except: + except Exception: pixel_scalebar = None print("No OAVS resolution is available.") @@ -834,7 +876,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 try: oav_period = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_period"] oav_expt = h["descriptors"][d]["configuration"]["OAV"]["data"]["OAV_cam_acquire_time"] - except: + except Exception: pass oav_times = [] for i in range(len(oavs)): @@ -846,8 +888,8 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 img = oavs[m] try: ind = np.flipud(img * scale)[:, :, 2] < threshold - except: - ind = np.flipud(img * scale) < threshold + except Exception: + np.flipud(img * scale) < threshold rgb_cont_img = np.copy(np.flipud(img)) # rgb_cont_img[ind,0]=1000 if brightness_scale != 1: @@ -858,7 +900,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 cross = [685, 440, 50] # definintion of direct beam: x, y, size plt.plot([cross[0] - cross[2] / 2, cross[0] + cross[2] / 2], [cross[1], cross[1]], "r-") plt.plot([cross[0], cross[0]], [cross[1] - cross[2] / 2, cross[1] + cross[2] / 2], "r-") - if pixel_scalebar != None: + if pixel_scalebar is not None: plt.plot([1100, 1100 + pixel_scalebar], [150, 150], "r-", Linewidth=5) # scale bar. plt.text(1000, 50, text_string, fontsize=14, color="r") plt.text(600, 50, str(oav_times[m])[:5] + " [s]", fontsize=14, color="r") @@ -996,7 +1038,7 @@ def lin2log_g2(lin_tau, lin_g2, num_points=False): lin_g2 = lin_g2[np.isfinite(lin_tau)] lin_tau = lin_tau[np.isfinite(lin_tau)] # print('from lin-to-log-g2_sampling: ',lin_tau) - if num_points == False: + if num_points is False: # automatically decide how many log-points (8/decade) dec = int(np.ceil((np.log10(lin_tau.max()) - np.log10(lin_tau.min())) * 8)) else: @@ -1061,7 +1103,6 @@ def delete_data(old_path, new_path="/tmp_data/data/"): new_path: the new path """ import glob - import shutil # old_path = sud[2][0] # new_path = '/tmp_data/data/' @@ -1086,11 +1127,11 @@ def show_tif_series( """ - if center != None: + if center is not None: cy, cx = center # infs = sorted(sample_list) N = len(tif_series) - if Nx == None: + if Nx is None: sy = int(np.sqrt(N)) else: sy = Nx @@ -1138,16 +1179,16 @@ def ps(y, shift=0.5, replot=True, logplot="off", x=None): """ - if x == None: + if x is None: x = np.arange(len(y)) x = np.array(x) y = np.array(y) PEAK = x[np.argmax(y)] - PEAK_y = np.max(y) + np.max(y) COM = np.sum(x * y) / np.sum(y) - ### from Maksim: assume this is a peak profile: + # from Maksim: assume this is a peak profile: def is_positive(num): return True if num > 0 else False @@ -1167,18 +1208,18 @@ def is_positive(num): ps.cen = CEN yf = ym # return { - # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), - # 'x_range': list_of_roots, + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, # } else: # ok, maybe it's a step function.. # print('no peak...trying step function...') ym = ym + shift - def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + def err_func(x, x0, k=2, A=1, base=0): # erf fit from Yugang return base - A * erf(k * (x - x0)) mod = Model(err_func) - ### estimate starting values: + # estimate starting values: x0 = np.mean(x) # k=0.1*(np.max(x)-np.min(x)) pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) @@ -1196,7 +1237,7 @@ def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang ps.fwhm = FWHM if replot: - ### re-plot results: + # re-plot results: if logplot == "on": fig, ax = plt.subplots() # plt.figure() ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") @@ -1224,7 +1265,7 @@ def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) # plt.show() - ### assign values of interest as function attributes: + # assign values of interest as function attributes: ps.peak = PEAK ps.com = COM return ps.cen @@ -1322,8 +1363,8 @@ def get_q_iq_using_dynamic_mask(FD, mask, setup_pargs, bin_number=1, threshold=1 q_saxs: q in A-1 """ beg = FD.beg - end = FD.end - shape = FD.rdframe(beg).shape + FD.end + FD.rdframe(beg).shape Nimg_ = FD.end - FD.beg # Nimg_ = 100 Nimg = Nimg_ // bin_number @@ -1397,14 +1438,14 @@ def average_array_withNan(array, axis=0, mask=None): Output: avg: averaged array along axis """ - shape = array.shape - if mask == None: + array.shape + if mask is None: mask = np.isnan(array) # mask = np.ma.masked_invalid(array).mask array_ = np.ma.masked_array(array, mask=mask) try: sums = np.array(np.ma.sum(array_[:, :], axis=axis)) - except: + except Exception: sums = np.array(np.ma.sum(array_[:], axis=axis)) cts = np.sum(~mask, axis=axis) @@ -1584,7 +1625,7 @@ def ls_dir2(inDir, string=None): from os import listdir from os.path import isfile, join - if string == None: + if string is None: tifs = np.array([f for f in listdir(inDir) if isfile(join(inDir, f))]) else: tifs = np.array([f for f in listdir(inDir) if (isfile(join(inDir, f))) & (string in f)]) @@ -1602,7 +1643,7 @@ def re_filename(old_filename, new_filename, inDir=None, verbose=True): '/home/yuzhang/Analysis/Timepix/2017_3/Results/run17/run17_pos1/' ) """ - if inDir != None: + if inDir is not None: os.rename(inDir + old_filename, inDir + new_filename) else: os.rename(old_filename, new_filename) @@ -1674,7 +1715,7 @@ def get_roi_nr(qdict, q, phi, q_nr=True, phi_nr=False, q_thresh=0, p_thresh=0, s qslist, phislist, ] # -> this is the original - if silent == False: + if silent is False: print("list of available Qs:") print(qslist) print("list of available phis:") @@ -1705,10 +1746,10 @@ def get_fit_by_two_linear( convinent fit class, gmfit2(x) gives yvale """ - if xrange == None: + if xrange is None: x1, x2 = min(x), max(x) x1, x2 = xrange - if mid_xpoint2 == None: + if mid_xpoint2 is None: mid_xpoint2 = mid_xpoint1 D1, gmfit1 = linear_fit(x, y, xrange=[x1, mid_xpoint1]) D2, gmfit2 = linear_fit(x, y, xrange=[mid_xpoint2, x2]) @@ -1740,7 +1781,7 @@ def get_curve_turning_points( def plot_fit_two_linear_fit(x, y, gmfit1, gmfit2, ax=None): """YG Octo 16,2017 Plot data with two fitted linear func""" - if ax == None: + if ax is None: fig, ax = plt.subplots() plot1D(x=x, y=y, ax=ax, c="k", legend="data", m="o", ls="") # logx=True, logy=True ) plot1D(x=x, y=gmfit1(x), ax=ax, c="r", m="", ls="-", legend="fit1") @@ -1752,7 +1793,7 @@ def linear_fit(x, y, xrange=None): """YG Octo 16,2017 copied from XPCS_SAXS a linear fit """ - if xrange != None: + if xrange is not None: xmin, xmax = xrange x1, x2 = find_index(x, xmin, tolerance=None), find_index(x, xmax, tolerance=None) x_ = x[x1:x2] @@ -1768,11 +1809,10 @@ def linear_fit(x, y, xrange=None): def find_index(x, x0, tolerance=None): """YG Octo 16,2017 copied from SAXS find index of x0 in x - #find the position of P in a list (plist) with tolerance + # find the position of P in a list (plist) with tolerance """ - N = len(x) - i = 0 + len(x) if x0 > max(x): position = len(x) - 1 elif x0 < min(x): @@ -1785,13 +1825,13 @@ def find_index(x, x0, tolerance=None): def find_index_old(x, x0, tolerance=None): """YG Octo 16,2017 copied from SAXS find index of x0 in x - #find the position of P in a list (plist) with tolerance + # find the position of P in a list (plist) with tolerance """ - N = len(x) + len(x) i = 0 position = None - if tolerance == None: + if tolerance is None: tolerance = (x[1] - x[0]) / 2.0 if x0 > max(x): position = len(x) - 1 @@ -1896,7 +1936,7 @@ def sgolay2d(z, window_size, order, derivative=None): Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band) # solve system and convolve - if derivative == None: + if derivative is None: m = np.linalg.pinv(A)[0].reshape((window_size, -1)) return scipy.signal.fftconvolve(Z, m, mode="valid") elif derivative == "col": @@ -1944,7 +1984,7 @@ def extract_data_from_file( Or giving start_row: int good_cols: list of integer, good index of cols lables: the label of the good_cols - #save: False, if True will save the data into a csv file with filename appending csv ?? + # save: False, if True will save the data into a csv file with filename appending csv ?? Return: a pds.dataframe Example: @@ -1961,30 +2001,30 @@ def extract_data_from_file( p = fin.readlines() di = 1e20 for i, line in enumerate(p): - if start_row != None: + if start_row is not None: di = start_row - elif good_line_pattern != None: + elif good_line_pattern is not None: if good_line_pattern in line: di = i else: di = 0 if i == di + 1: els = line.split() - if good_cols == None: + if good_cols is None: data = np.array(els, dtype=float) else: data = np.array([els[j] for j in good_cols], dtype=float) elif i > di: try: els = line.split() - if good_cols == None: + if good_cols is None: temp = np.array(els, dtype=float) else: temp = np.array([els[j] for j in good_cols], dtype=float) data = np.vstack((data, temp)) - except: + except Exception: pass - if labels == None: + if labels is None: labels = np.arange(data.shape[1]) df = pds.DataFrame(data, index=np.arange(data.shape[0]), columns=labels) return df @@ -2012,9 +2052,9 @@ def get_print_uids(start_time, stop_time, return_all_info=False): date = time.ctime(hdrs[-i - 1]["start"]["time"]) try: m = hdrs[-i - 1]["start"]["Measurement"] - except: + except Exception: m = "" - info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + info = "%3d: uid = '%s' #%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) print(info) if return_all_info: all_info[n] = info @@ -2081,7 +2121,7 @@ def create_ring_mask(shape, r1, r2, center, mask=None): m[rr, cc] = 1 rr, cc = disk((center[1], center[0]), r1, shape=shape) m[rr, cc] = 0 - if mask != None: + if mask is not None: m += mask return m @@ -2156,7 +2196,7 @@ def validate_uid(uid): imgs = load_data(uid, md["detector"], reverse=True) print(imgs) return 1 - except: + except Exception: print("Can't load this uid=%s!" % uid) return 0 @@ -2252,7 +2292,7 @@ def plot_g1(taus, g2, g2_fit_paras, qr=None, ylim=[0, 1], title=""): Plot one-time correlation, giving taus, g2, g2_fit""" noqs = g2.shape[1] fig, ax = plt.subplots() - if qr == None: + if qr is None: qr = np.arange(noqs) for i in range(noqs): b = g2_fit_paras["baseline"][i] @@ -2304,7 +2344,7 @@ def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): return rm -## +# # Dev at March 31 for create Eiger chip mask def create_chip_edges_mask(det="1M"): """Create a chip edge mask for Eiger detector""" @@ -2356,7 +2396,7 @@ def create_folder(base_folder, sub_folder): """ data_dir0 = os.path.join(base_folder, sub_folder) - ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + # Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 @@ -2372,20 +2412,20 @@ def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): Created folder name """ if username != "Default": - if username == None: + if username is None: username = getpass.getuser() data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") else: data_dir0 = os.path.join(default_dir, CYCLE + "/") - ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + # Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 -################################## -#########For dose analysis ####### -################################## +# +# For dose analysis # +# def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): """ Calculate the frame number to be correlated by giving a X-ray exposure dose @@ -2445,7 +2485,7 @@ def get_series_g2_taus(fra_max_list, acq_time=1, max_fra_num=None, log_taus=True """ tausd = {} for n in fra_max_list: - if max_fra_num != None: + if max_fra_num is not None: L = max_fra_num else: L = np.infty @@ -2488,14 +2528,14 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm try: lambda_ = md["wavelength"] - except: + except Exception: lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms try: Ldet = md["det_distance"] if Ldet <= 1000: Ldet *= 1000 md["det_distance"] = Ldet - except: + except Exception: Ldet = md["detector_distance"] if Ldet <= 1000: Ldet *= 1000 @@ -2503,21 +2543,21 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * try: # try exp time from detector exposuretime = md["count_time"] # exposure time in sec - except: + except Exception: exposuretime = md["cam_acquire_time"] # exposure time in sec try: # try acq time from detector acquisition_period = md["frame_time"] - except: + except Exception: try: acquisition_period = md["acquire period"] - except: + except Exception: uid = md["uid"] acquisition_period = float(db[uid]["start"]["acquire period"]) timeperframe = acquisition_period - if inc_x0 != None: + if inc_x0 is not None: mdn["beam_center_x"] = inc_y0 print("Beam_center_x has been changed to %s. (no change in raw metadata): " % inc_y0) - if inc_y0 != None: + if inc_y0 is not None: mdn["beam_center_y"] = inc_x0 print("Beam_center_y has been changed to %s. (no change in raw metadata): " % inc_x0) center = [int(mdn["beam_center_x"]), int(mdn["beam_center_y"])] # beam center [y,x] for python image @@ -2589,13 +2629,13 @@ def get_qval_dict(qr_center, qz_center=None, qval_dict=None, multi_qr_for_one_qz """ - if qval_dict == None: + if qval_dict is None: qval_dict = {} maxN = 0 else: maxN = np.max(list(qval_dict.keys())) + 1 - if qz_center != None: + if qz_center is not None: if multi_qr_for_one_qz: if one_qz_multi_qr: for qzind in range(len(qz_center)): @@ -2668,7 +2708,7 @@ def check_bad_uids(uids, mask, img_choice_N=10, bad_uids_index=None): buids = [] guids = list(uids) # print( guids ) - if bad_uids_index == None: + if bad_uids_index is None: bad_uids_index = [] for i, uid in enumerate(uids): # print( i, uid ) @@ -2702,7 +2742,7 @@ def find_uids(start_time, stop_time): hdrs = db(start_time=start_time, stop_time=stop_time) try: print("Totally %s uids are found." % (len(list(hdrs)))) - except: + except Exception: pass sids = [] uids = [] @@ -2724,7 +2764,7 @@ def ployfit(y, x=None, order=20): fit data (one-d array) by a ploynominal function return the fitted one-d array """ - if x == None: + if x is None: x = range(len(y)) pol = np.polyfit(x, y, order) return np.polyval(pol, x) @@ -2749,9 +2789,9 @@ def check_bad_data_points( else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve """ - if good_start == None: + if good_start is None: good_start = 0 - if good_end == None: + if good_end is None: good_end = len(data) bd1 = [i for i in range(0, good_start)] bd3 = [i for i in range(good_end, len(data))] @@ -2809,7 +2849,7 @@ def check_bad_data_points( legend_size=legend_size, ) - if path != None: + if path is not None: fp = path + "%s" % (uid) + "_find_bad_points" + ".png" plt.savefig(fp, dpi=fig.dpi) bd2 = list(np.where(np.abs(d - d.mean()) > scale * d.std())[0] + good_start) @@ -2840,9 +2880,9 @@ def get_bad_frame_list( else: use the mean (a value) of imgsum and scale to get low and high threshold, it's good to remove bad frames/pixels on top of flatten curve """ - if good_start == None: + if good_start is None: good_start = 0 - if good_end == None: + if good_end is None: good_end = len(imgsum) bd1 = [i for i in range(0, good_start)] bd3 = [i for i in range(good_end, len(imgsum))] @@ -2900,7 +2940,7 @@ def get_bad_frame_list( legend_size=legend_size, ) - if path != None: + if path is not None: fp = path + "%s" % (uid) + "_imgsum_analysis" + ".png" plt.savefig(fp, dpi=fig.dpi) @@ -2956,12 +2996,12 @@ def print_dict(dicts, keys=None): print keys: values in a dicts if keys is None: print all the keys """ - if keys == None: + if keys is None: keys = list(dicts.keys()) for k in keys: try: print("%s--> %s" % (k, dicts[k])) - except: + except Exception: pass @@ -3003,7 +3043,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md["suid"] = uid # short uid try: md["filename"] = get_sid_filenames(header)[2][0] - except: + except Exception: md["filename"] = "N.A." devices = sorted(list(header.devices())) @@ -3023,7 +3063,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): # detector_names = sorted( header.start['detectors'] ) detector_names = sorted(get_detectors(db[uid])) # if len(detector_names) > 1: - # raise ValueError("More than one det. This would have unintented consequences.") + # raise ValueError("More than one det. This would have unintented consequences.") detector_name = detector_names[0] # md['detector'] = detector_name md["detector"] = get_detector(header) @@ -3034,12 +3074,12 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md[newkey] = val # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): - # md[ k[len(dec)+1:] ]= v + # md[ k[len(dec)+1:] ]= v try: md.update(header.start["plan_args"].items()) md.pop("plan_args") - except: + except Exception: pass md.update(header.start.items()) @@ -3050,7 +3090,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): if "primary" in header.v2: descriptor = header.v2["primary"].descriptors[0] md["img_shape"] = descriptor["data_keys"][md["detector"]]["shape"][:2][::-1] - except: + except Exception: if verbose: print("couldn't find image shape...skip!") else: @@ -3059,7 +3099,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): # for k, v in sorted(md.items()): # ... - # print(f'{k}: {v}') + # print(f'{k}: {v}') return md @@ -3103,7 +3143,7 @@ def get_max_countc(FD, labeled_array): (p, v) = FD.rdrawframe(i) w = np.where(timg[p])[0] max_inten = max(max_inten, np.max(v[w])) - except: + except Exception: pass return max_inten @@ -3121,7 +3161,7 @@ def create_polygon_mask(image, xcorners, ycorners): """ - from skimage.draw import disk, line, line_aa, polygon + from skimage.draw import polygon imy, imx = image.shape bst_mask = np.zeros_like(image, dtype=bool) @@ -3144,7 +3184,7 @@ def create_rectangle_mask(image, xcorners, ycorners): """ - from skimage.draw import disk, line, line_aa, polygon + from skimage.draw import polygon imy, imx = image.shape bst_mask = np.zeros_like(image, dtype=bool) @@ -3199,7 +3239,7 @@ def create_wedge(image, center, radius, wcors, acute_angle=True): wcors: [ [x1,x2,x3...], [y1,y2,y3..] """ - from skimage.draw import disk, line, line_aa, polygon + from skimage.draw import disk, polygon imy, imx = image.shape cy, cx = center @@ -3236,12 +3276,12 @@ def create_cross_mask( Return: the cross mask """ - from skimage.draw import disk, line, line_aa, polygon + from skimage.draw import disk, polygon imy, imx = image.shape cx, cy = center bst_mask = np.zeros_like(image, dtype=bool) - ### + # # for right part wy = wy_right x = np.array([cx, imx, imx, cx]) @@ -3249,7 +3289,7 @@ def create_cross_mask( rr, cc = polygon(y, x, shape=image.shape) bst_mask[rr, cc] = 1 - ### + # # for left part wy = wy_left x = np.array([0, cx, cx, 0]) @@ -3257,7 +3297,7 @@ def create_cross_mask( rr, cc = polygon(y, x, shape=image.shape) bst_mask[rr, cc] = 1 - ### + # # for up part wx = wx_up x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) @@ -3265,7 +3305,7 @@ def create_cross_mask( rr, cc = polygon(y, x, shape=image.shape) bst_mask[rr, cc] = 1 - ### + # # for low part wx = wx_down x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) @@ -3325,14 +3365,14 @@ def export_scan_scalar( return datap -##### +# # load data by databroker def get_flatfield(uid, reverse=False): import h5py - detector = get_detector(db[uid]) + get_detector(db[uid]) sud = get_sid_filenames(db[uid]) master_path = "%s_master.h5" % (sud[2][0]) print(master_path) @@ -3438,48 +3478,48 @@ def get_sid_filenames(hdr, verbose=False): # def get_sid_filenames(header): -# """YG. Dev Jan, 2016 -# Get a bluesky scan_id, unique_id, filename by giveing uid - -# Parameters -# ---------- -# header: a header of a bluesky scan, e.g. db[-1] - -# Returns -# ------- -# scan_id: integer -# unique_id: string, a full string of a uid -# filename: sring - -# Usuage: -# sid,uid, filenames = get_sid_filenames(db[uid]) - -# """ -# from collections import defaultdict -# from glob import glob -# from pathlib import Path - -# filepaths = [] -# resources = {} # uid: document -# datums = defaultdict(list) # uid: List(document) -# for name, doc in header.documents(): -# if name == "resource": -# resources[doc["uid"]] = doc -# elif name == "datum": -# datums[doc["resource"]].append(doc) -# elif name == "datum_page": -# for datum in event_model.unpack_datum_page(doc): -# datums[datum["resource"]].append(datum) -# for resource_uid, resource in resources.items(): -# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) -# if 'eiger' not in resource['spec'].lower(): -# continue -# for datum in datums[resource_uid]: -# dm_kw = datum["datum_kwargs"] -# seq_id = dm_kw['seq_id'] -# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') -# filepaths.extend(new_filepaths) -# return header.start['scan_id'], header.start['uid'], filepaths +# """YG. Dev Jan, 2016 +# Get a bluesky scan_id, unique_id, filename by giveing uid + +# Parameters +# ---------- +# header: a header of a bluesky scan, e.g. db[-1] + +# Returns +# ------- +# scan_id: integer +# unique_id: string, a full string of a uid +# filename: sring + +# Usuage: +# sid,uid, filenames = get_sid_filenames(db[uid]) + +# """ +# from collections import defaultdict +# from glob import glob +# from pathlib import Path + +# filepaths = [] +# resources = {} # uid: document +# datums = defaultdict(list) # uid: List(document) +# for name, doc in header.documents(): +# if name == "resource": +# resources[doc["uid"]] = doc +# elif name == "datum": +# datums[doc["resource"]].append(doc) +# elif name == "datum_page": +# for datum in event_model.unpack_datum_page(doc): +# datums[datum["resource"]].append(datum) +# for resource_uid, resource in resources.items(): +# file_prefix = Path(resource.get('root', '/'), resource["resource_path"]) +# if 'eiger' not in resource['spec'].lower(): +# continue +# for datum in datums[resource_uid]: +# dm_kw = datum["datum_kwargs"] +# seq_id = dm_kw['seq_id'] +# new_filepaths = glob(f'{file_prefix!s}_{seq_id}*') +# filepaths.extend(new_filepaths) +# return header.start['scan_id'], header.start['uid'], filepaths def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): @@ -3523,7 +3563,6 @@ def load_dask_data(uid, detector, mask_path_full, reverse=False, rot90=False): mask_dict = json.load(json_open) img_md["pixel_mask"] = np.array(mask_dict["pixel_mask"]) img_md["binary_mask"] = np.array(mask_dict["binary_mask"]) - del mask_dict # load image data as dask-arry: dimg = hdr.xarray_dask()[md["detector"]][0] @@ -3641,7 +3680,7 @@ def load_data2(uid, detector="eiger4m_single_image"): try: (ev,) = hdr.events(fields=[detector]) flag = 0 - except: + except Exception: flag += 1 print("Trying again ...!") @@ -3725,7 +3764,7 @@ def create_hot_pixel_mask(img, threshold, center=None, center_radius=300, outer_ """ bst_mask = np.ones_like(img, dtype=bool) - if center != None: + if center is not None: from skimage.draw import disk imy, imx = img.shape @@ -3786,8 +3825,8 @@ def RemoveHot(img, threshold=1e7, plot_=True): return mask -############ -###plot data +# +# plot data def show_img( @@ -3839,7 +3878,7 @@ def show_img( ------- None """ - if ax == None: + if ax is None: if RUN_GUI: fig = Figure() ax = fig.add_subplot(111) @@ -3848,7 +3887,7 @@ def show_img( else: fig, ax = ax - if center != None: + if center is not None: plot1D(center[1], center[0], ax=ax, c="b", m="o", legend="") if not logs: if not use_mat_imshow: @@ -3886,13 +3925,13 @@ def show_img( norm=LogNorm(vmin, vmax), extent=extent, ) - if label_array != None: + if label_array is not None: im2 = show_label_array(ax, label_array, alpha=alpha, cmap=cmap, interpolation=interpolation) ax.set_title(image_name) - if xlim != None: + if xlim is not None: ax.set_xlim(xlim) - if ylim != None: + if ylim is not None: ax.set_ylim(ylim) if not show_ticks: @@ -3906,13 +3945,13 @@ def show_img( # mpl.rcParams['ytick.labelsize'] = tick_size # print(tick_size) - if ylabel != None: + if ylabel is not None: # ax.set_ylabel(ylabel)#, fontsize = 9) ax.set_ylabel(ylabel, fontsize=lab_fontsize) - if xlabel != None: + if xlabel is not None: ax.set_xlabel(xlabel, fontsize=lab_fontsize) - if aspect != None: + if aspect is not None: # aspect = image.shape[1]/float( image.shape[0] ) ax.set_aspect(aspect) else: @@ -3929,7 +3968,7 @@ def show_img( fp = path + "%s" % (file_name) + CurTime + "." + save_format else: fp = path + "%s" % (image_name) + "." + save_format - if dpi == None: + if dpi is None: dpi = fig.dpi plt.savefig(fp, dpi=dpi) # fig.set_tight_layout(tight) @@ -3965,53 +4004,53 @@ def plot1D( ------- None """ - if ax == None: + if ax is None: if RUN_GUI: fig = Figure() ax = fig.add_subplot(111) else: - if figsize != None: + if figsize is not None: fig, ax = plt.subplots(figsize=figsize) else: fig, ax = plt.subplots() - if legend == None: + if legend is None: legend = " " try: logx = kwargs["logx"] - except: + except Exception: logx = False try: logy = kwargs["logy"] - except: + except Exception: logy = False try: logxy = kwargs["logxy"] - except: + except Exception: logxy = False - if logx == True and logy == True: + if logx and logy: logxy = True try: marker = kwargs["marker"] - except: + except Exception: try: marker = kwargs["m"] - except: + except Exception: marker = next(markers_) try: color = kwargs["color"] - except: + except Exception: try: color = kwargs["c"] - except: + except Exception: color = next(colors_) - if x == None: + if x is None: x = range(len(y)) - if yerr == None: + if yerr is None: ax.plot( x, y, @@ -4060,7 +4099,7 @@ def plot1D( title = "plot" ax.set_title(title) # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') - if (legend != "") and (legend != None): + if (legend != "") and (legend is not None): ax.legend(loc="best", fontsize=legend_size) if "save" in kwargs.keys(): if kwargs["save"]: @@ -4073,7 +4112,7 @@ def plot1D( return fig -### +# def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): @@ -4150,7 +4189,7 @@ def get_each_frame_intensity( def create_time_slice(N, slice_num, slice_width, edges=None): """create a ROI time regions""" - if edges != None: + if edges is not None: time_edge = edges else: if slice_num == 1: @@ -4203,14 +4242,14 @@ def show_label_array(ax, label_array, cmap=None, aspect=None, interpolation="nea img : AxesImage The artist added to the axes """ - if cmap == None: + if cmap is None: cmap = "viridis" # print(cmap) _cmap = copy.copy((mcm.get_cmap(cmap))) _cmap.set_under("w", 0) vmin = max(0.5, kwargs.pop("vmin", 0.5)) im = ax.imshow(label_array, cmap=cmap, interpolation=interpolation, vmin=vmin, **kwargs) - if aspect == None: + if aspect is None: ax.set_aspect(aspect="auto") # ax.set_aspect('equal') return im @@ -4308,7 +4347,7 @@ def show_ROI_on_image( if RUN_GUI: fig = Figure(figsize=(8, 8)) axes = fig.add_subplot(111) - elif fig_ax != None: + elif fig_ax is not None: fig, axes = fig_ax else: fig, axes = plt.subplots() # plt.subplots(figsize=(8,8)) @@ -4337,7 +4376,7 @@ def show_ROI_on_image( origin="lower", ) else: - edg = get_image_edge(ROI) + get_image_edge(ROI) image_ = get_image_with_roi(image, ROI, scale_factor=2) # fig, axes = plt.subplots( ) show_img( @@ -4350,8 +4389,8 @@ def show_ROI_on_image( cmap=cmap, ) - if rect_reqion == None: - if center != None: + if rect_reqion is None: + if center is not None: x1, x2 = [center[1] - rwidth, center[1] + rwidth] y1, y2 = [center[0] - rwidth, center[0] + rwidth] axes.set_xlim([x1, x2]) @@ -4392,7 +4431,7 @@ def show_ROI_on_image( def crop_image(image, crop_mask): """Crop the non_zeros pixels of an image to a new image""" - from skimage.util import crop, pad + from skimage.util import crop pxlst = np.where(crop_mask.ravel())[0] dims = crop_mask.shape @@ -4414,7 +4453,7 @@ def crop_image(image, crop_mask): def get_avg_img(data_series, img_samp_index=None, sampling=100, plot_=False, save=False, *argv, **kwargs): """Get average imagef from a data_series by every sampling number to save time""" - if img_samp_index == None: + if img_samp_index is None: avg_img = np.average(data_series[::sampling], axis=0) else: avg_img = np.zeros_like(data_series[0]) @@ -4508,7 +4547,7 @@ def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good bad_img_list = np.array(bad_frame_list) - good_start new_imgs = mask_image.bad_to_nan_gen(image_series, bad_img_list) - if num_lev == None: + if num_lev is None: num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) print("%s frames will be processed..." % (noframes)) @@ -4519,7 +4558,7 @@ def cal_g2(image_series, ring_mask, bad_image_process, bad_frame_list=None, good else: - if num_lev == None: + if num_lev is None: num_lev = int(np.log(noframes / (num_buf - 1)) / np.log(2) + 1) + 1 print("In this g2 calculation, the buf and lev number are: %s--%s--" % (num_buf, num_lev)) print("%s frames will be processed..." % (noframes)) @@ -4564,7 +4603,6 @@ def trans_data_to_pd(data, label=None, dtype="array"): a pandas.DataFrame """ # lists a [ list1, list2...] all the list have the same length - import sys import pandas as pd from numpy import arange, array @@ -4579,7 +4617,7 @@ def trans_data_to_pd(data, label=None, dtype="array"): print("Wrong data type! Now only support 'list' and 'array' tpye") index = arange(N) - if label == None: + if label is None: label = ["data%s" % i for i in range(M)] # print label df = pd.DataFrame(data, index=index, columns=label) @@ -4610,7 +4648,7 @@ def save_lists(data, label=None, filename=None, path=None, return_res=False, ver df = trans_data_to_pd(d.T, label, "array") # dt =datetime.now() # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - if filename == None: + if filename is None: filename = "data" filename = os.path.join(path, filename) # +'.csv') df.to_csv(filename) @@ -4669,7 +4707,7 @@ def save_arrays(data, label=None, dtype="array", filename=None, path=None, retur df = trans_data_to_pd(data, label, dtype) # dt =datetime.now() # CurTime = '%s%02d%02d-%02d%02d-' % (dt.year, dt.month, dt.day,dt.hour,dt.minute) - if filename == None: + if filename is None: filename = "data" filename_ = os.path.join(path, filename) # +'.csv') df.to_csv(filename_) @@ -4822,12 +4860,12 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): try: iter(width) width_is_list = True - except: + except Exception: width_is_list = False try: iter(spacing) spacing_is_list = True - except: + except Exception: spacing_is_list = False # width_is_list = isinstance(width, collections.Iterable) @@ -4835,7 +4873,7 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): if width_is_list and spacing_is_list: if len(width) != len(spacing) + 1: raise ValueError("List of spacings must be one less than list " "of widths.") - if num_rings == None: + if num_rings is None: try: num_rings = len(width) except TypeError: @@ -4858,7 +4896,7 @@ def ring_edges(inner_radius, width, spacing=0, num_rings=None): if not width_is_list: width = np.ones(num_rings) * width - if spacing == None: + if spacing is None: spacing = [] else: if not spacing_is_list: @@ -4907,13 +4945,13 @@ def get_non_uniform_edges( inner and outer radius for each ring """ - if number_rings == None: + if number_rings is None: number_rings = 1 edges = np.zeros([len(centers) * number_rings, 2]) try: iter(width) - except: + except Exception: width = np.ones_like(centers) * width for i, c in enumerate(centers): edges[i * number_rings : (i + 1) * number_rings, :] = ring_edges( @@ -4929,7 +4967,6 @@ def trans_tf_to_td(tf, dtype="dframe"): import datetime import numpy as np - import pandas as pd """translate time.float to time.date, td.type dframe: a dataframe @@ -5084,7 +5121,7 @@ def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res return df -########### +# # *for g2 fit and plot @@ -5122,7 +5159,7 @@ def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): """Nov 9, 2017 Basically, make q vector to (qr, angle), - ###relaxation_rate is actually a diffusion rate + # relaxation_rate is actually a diffusion rate flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) Diffusion part: np.exp( -2*D q^2 *tau ) q_ang: would be np.radians( ang - 90 ) @@ -5392,7 +5429,7 @@ def get_g2_fit_general( fit_res = [] model_data = [] for i in range(num_rings): - if fit_range != None: + if fit_range is not None: y_ = g2[1:, i][fit_range[0] : fit_range[1]] lags_ = taus[1:][fit_range[0] : fit_range[1]] else: @@ -5411,7 +5448,7 @@ def get_g2_fit_general( try: if isinstance(_guess_val[k], (np.ndarray, list)): pars[k].value = _guess_val[k][i] - except: + except Exception: pass if True: @@ -5431,7 +5468,7 @@ def get_g2_fit_general( # print(k, _guess_val[k] ) # pars[k].value = _guess_val[k][i] if function == "flow_para_function_explicitq" or function == "flow_para_qang": - if qval_dict == None: + if qval_dict is None: print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") else: @@ -5451,7 +5488,7 @@ def get_g2_fit_general( pars["%s" % v].vary = False # if i==20: - # print(pars) + # print(pars) # print( pars ) result1 = mod.fit(y, pars, x=lags) # print(qval_dict[i][0], qval_dict[i][1], y) @@ -5553,9 +5590,9 @@ def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): ) -############################################ -##a good func to plot g2 for all types of geogmetries -############################################ +# +# a good func to plot g2 for all types of geogmetries +# def plot_g2_general( @@ -5636,7 +5673,7 @@ def plot_g2_general( if geometry == "saxs": if qphi_analysis: geometry = "ang_saxs" - if qth_interest != None: + if qth_interest is not None: if not isinstance(qth_interest, list): print("Please give a list for qth_interest") else: @@ -5647,10 +5684,10 @@ def plot_g2_general( for k in list(g2_dict.keys()): g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] # for k in list(taus_dict.keys()): - # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] taus_dict_ = taus_dict qval_dict_ = {k: qval_dict[k] for k in qth_interest} - if fit_res != None: + if fit_res is not None: fit_res_ = [fit_res[k] for k in qth_interest] else: fit_res_ = None @@ -5680,8 +5717,8 @@ def plot_g2_general( ind_long_i = ind_long[s_ind] num_long_i = len(ind_long_i) # if show_average_ang_saxs: - # if geometry=='ang_saxs': - # num_long_i += 1 + # if geometry=='ang_saxs': + # num_long_i += 1 if RUN_GUI: fig = Figure(figsize=(10, 12)) else: @@ -5752,8 +5789,8 @@ def plot_g2_general( for i, l_ind in enumerate(ind_long_i): if num_long_i <= max_plotnum_fig: # if s_ind ==2: - # print('Here') - # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) ax = fig.add_subplot(sx, sy, i + 1) if sx == 1: if sy == 1: @@ -5788,20 +5825,20 @@ def plot_g2_general( ax.set_title(title_long + " (%s )" % (1 + l_ind), y=1.05, fontsize=fontsize_sublabel) # print( geometry ) # print( title_long ) - if qth_interest != None: # it might have a bug here, todolist!!! + if qth_interest is not None: # it might have a bug here, todolist!!! lab = sorted(list(qval_dict_.keys())) # print( lab, l_ind) ax.set_title(title_long + " (%s )" % (lab[l_ind] + 1), y=1.05, fontsize=12) for ki, k in enumerate(list(g2_dict_.keys())): if ki == 0: c = "b" - if fit_res == None: + if fit_res is None: m = "-o" else: m = "o" elif ki == 1: c = "r" - if fit_res == None: + if fit_res is None: m = "s" else: m = "-" @@ -5812,10 +5849,10 @@ def plot_g2_general( c = colors[ki + 2] m = "-%s" % markers[ki + 2] try: - dumy = g2_dict_[k].shape + g2_dict_[k].shape # print( 'here is the shape' ) islist = False - except: + except Exception: islist_n = len(g2_dict_[k]) islist = True # print( 'here is the list' ) @@ -5827,8 +5864,8 @@ def plot_g2_general( x = taus_dict_[k][nlst] if ki == 0: ymin, ymax = min(y), max(y[1:]) - if g2_err_dict == None: - if g2_labels == None: + if g2_err_dict is None: + if g2_labels is None: ax.semilogx(x, y, m, color=c, markersize=6) else: # print('here ki ={} nlst = {}'.format( ki, nlst )) @@ -5838,7 +5875,7 @@ def plot_g2_general( ax.semilogx(x, y, m, color=c, markersize=6) else: yerr = g2_err_dict[k][nlst][:, l_ind] - if g2_labels == None: + if g2_labels is None: ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) else: if nlst == 0: @@ -5855,8 +5892,8 @@ def plot_g2_general( x = taus_dict_[k] if ki == 0: ymin, ymax = min(y), max(y[1:]) - if g2_err_dict == None: - if g2_labels == None: + if g2_err_dict is None: + if g2_labels is None: ax.semilogx(x, y, m, color=c, markersize=6) else: ax.semilogx(x, y, m, color=c, markersize=6, label=g2_labels[ki]) @@ -5864,7 +5901,7 @@ def plot_g2_general( yerr = g2_err_dict[k][:, l_ind] # print(x.shape, y.shape, yerr.shape) # print(yerr) - if g2_labels == None: + if g2_labels is None: ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6) else: ax.errorbar(x, y, yerr=yerr, fmt=m, color=c, markersize=6, label=g2_labels[ki]) @@ -5872,7 +5909,7 @@ def plot_g2_general( if l_ind == 0: ax.legend(loc="best", fontsize=8, fancybox=True, framealpha=0.5) - if fit_res_ != None: + if fit_res_ is not None: result1 = fit_res_[l_ind] # print (result1.best_values) @@ -5900,7 +5937,7 @@ def plot_g2_general( # print(qrr) rate = diff * qrr**2 flow = result1.best_values["flow_velocity"] - if qval_dict_ == None: + if qval_dict_ is None: print("Please provide qval_dict, a dict with qr and ang (in unit of degrees).") else: pass @@ -5954,7 +5991,7 @@ def plot_g2_general( vmin, vmax = kwargs["vlim"] try: ax.set_ylim([ymin * vmin, ymax * vmax]) - except: + except Exception: pass else: pass @@ -5975,7 +6012,7 @@ def plot_g2_general( # print(fig) try: plt.savefig(fp + ".png", dpi=fig.dpi) - except: + except Exception: print("Can not save figure here.") else: @@ -6059,7 +6096,6 @@ def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv mastp, ) = get_short_long_labels_from_qval_dict(qval_dict, geometry=geometry) - Nqr = num_long Nqz = num_short D0 = np.zeros(Nqz) power = 2 # np.zeros( Nqz ) @@ -6070,7 +6106,7 @@ def get_q_rate_fit_general(qval_dict, rate, geometry="saxs", weights=None, *argv y = np.array(rate)[ind_long_i] x = long_label[ind_long_i] # print(y,x) - if fit_range != None: + if fit_range is not None: y = y[fit_range[0] : fit_range[1]] x = x[fit_range[0] : fit_range[1]] # print (i, y,x) @@ -6174,12 +6210,12 @@ def plot_q_rate_fit_general( if Nqz != 1: legend = ax.legend(loc="best") - if plot_index_range != None: + if plot_index_range is not None: d1, d2 = plot_index_range d2 = min(len(x) - 1, d2) ax.set_xlim((x**power)[d1], (x**power)[d2]) ax.set_ylim(y[d1], y[d2]) - if ylim != None: + if ylim is not None: ax.set_ylim(ylim) ax.set_ylabel("Relaxation rate " r"$\gamma$" "($s^{-1}$)") @@ -6274,7 +6310,7 @@ def outlier_mask( upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) if verbose: print("upper outlier threshold: %s" % upper_outlier_threshold) - except: + except Exception: upper_outlier_threshold = False if verbose: print("no upper outlier threshold found") @@ -6282,7 +6318,7 @@ def outlier_mask( ind2 = (out_l * pixel[0][0]) < ave_roi_int try: lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) - except: + except Exception: lower_outlier_threshold = False if verbose: print("no lower outlier threshold found") @@ -6290,7 +6326,7 @@ def outlier_mask( if verbose: print("ROI #%s: no outliers detected" % rn) - ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + # MAKE SURE we don't REMOVE more than x percent of the pixels in the roi outlier_fraction = np.sum(out_l) / len(pixel[0][0]) if verbose: print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) diff --git a/pyCHX/chx_handlers.py b/pyCHX/chx_handlers.py index 998ce9c..1ff6cc9 100644 --- a/pyCHX/chx_handlers.py +++ b/pyCHX/chx_handlers.py @@ -1,11 +1,10 @@ -###Copied from chxtools/chxtools/handlers.py -###https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py +# Copied from chxtools/chxtools/handlers.py +# https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py # handler registration and database instantiation should be done # here and only here! from databroker import Broker -from databroker.assets.handlers_base import HandlerBase from eiger_io.fs_handler import EigerHandler as EigerHandlerPIMS from eiger_io.fs_handler import EigerImages as EigerImagesPIMS diff --git a/pyCHX/chx_libs.py b/pyCHX/chx_libs.py index 4440215..17969f8 100644 --- a/pyCHX/chx_libs.py +++ b/pyCHX/chx_libs.py @@ -4,58 +4,24 @@ This module is for the necessary packages for the XPCS analysis """ -## Import all the required packages for Data Analysis +# Import all the required packages for Data Analysis from databroker import Broker -from databroker.assets.path_only_handlers import RawHandler # edit handlers here to switch to PIMS or dask # this does the databroker import # from chxtools.handlers import EigerHandler -from eiger_io.fs_handler import EigerHandler -from IPython.core.magics.display import Javascript -from modest_image import imshow -from skbeam.core.utils import multi_tau_lags -from skimage.draw import disk, ellipse, line, line_aa, polygon db = Broker.named("chx") -import collections -import copy -import getpass import itertools -import os -import pickle -import random -import sys -import time -import warnings -from datetime import datetime -import h5py import matplotlib as mpl -import matplotlib.cm as mcm import matplotlib.pyplot as plt import numpy as np -import pims -import skbeam.core.correlation as corr -import skbeam.core.roi as roi -import skbeam.core.utils as utils # * scikit-beam - data analysis tools for X-ray science -# - https://github.com/scikit-beam/scikit-beam +# - https://github.com/scikit-beam/scikit-beam # * xray-vision - plotting helper functions for X-ray science -# - https://github.com/Nikea/xray-vision -import xray_vision -import xray_vision.mpl_plotting as mpl_plot -from lmfit import Model, Parameter, Parameters, minimize, report_fit -from matplotlib import gridspec -from matplotlib.colors import LogNorm -from matplotlib.figure import Figure -from mpl_toolkits.axes_grid1 import make_axes_locatable -from pandas import DataFrame -from PIL import Image -from tqdm import tqdm -from xray_vision.mask.manual_mask import ManualMask -from xray_vision.mpl_plotting import speckle +# - https://github.com/Nikea/xray-vision mcolors = itertools.cycle(["b", "g", "r", "c", "m", "y", "k", "darkgoldenrod", "oldlace", "brown", "dodgerblue"]) markers = itertools.cycle(list(plt.Line2D.filled_markers)) @@ -357,7 +323,7 @@ # colors_ = itertools.cycle(sorted_colors_ ) markers_ = itertools.cycle(markers) # Custom colormaps -################################################################################ +# # ROYGBVR but with Cyan-Blue instead of Blue color_list_cyclic_spectrum = [ [1.0, 0.0, 0.0], diff --git a/pyCHX/chx_outlier_detection.py b/pyCHX/chx_outlier_detection.py index 596393e..384a43c 100644 --- a/pyCHX/chx_outlier_detection.py +++ b/pyCHX/chx_outlier_detection.py @@ -1,3 +1,7 @@ +import matplotlib.pyplot as plt +import numpy as np + + def is_outlier(points, thresh=3.5, verbose=False): """MAD test""" points.tolist() @@ -50,7 +54,7 @@ def outlier_mask( upper_outlier_threshold = np.nanmin((out_l * pixel[0][0])[out_l * pixel[0][0] > ave_roi_int]) if verbose: print("upper outlier threshold: %s" % upper_outlier_threshold) - except: + except Exception: upper_outlier_threshold = False if verbose: print("no upper outlier threshold found") @@ -58,7 +62,7 @@ def outlier_mask( ind2 = (out_l * pixel[0][0]) < ave_roi_int try: lower_outlier_threshold = np.nanmax((out_l * pixel[0][0])[ind1 * ind2]) - except: + except Exception: lower_outlier_threshold = False if verbose: print("no lower outlier threshold found") @@ -66,7 +70,7 @@ def outlier_mask( if verbose: print("ROI #%s: no outliers detected" % rn) - ### MAKE SURE we don't REMOVE more than x percent of the pixels in the roi + # MAKE SURE we don't REMOVE more than x percent of the pixels in the roi outlier_fraction = np.sum(out_l) / len(pixel[0][0]) if verbose: print("fraction of pixel values detected as outliers: %s" % np.round(outlier_fraction, 2)) diff --git a/pyCHX/chx_packages.py b/pyCHX/chx_packages.py index c3087c8..3937c71 100644 --- a/pyCHX/chx_packages.py +++ b/pyCHX/chx_packages.py @@ -1,250 +1,4 @@ -import pickle as cpk - -import historydict -from eiger_io.fs_handler import EigerImages -from skimage.draw import line, line_aa, polygon - -from pyCHX.chx_handlers import use_dask, use_pims -from pyCHX.chx_libs import ( - EigerHandler, - Javascript, - LogNorm, - Model, - cmap_albula, - cmap_vge, - datetime, - db, - getpass, - h5py, - multi_tau_lags, - np, - os, - pims, - plt, - random, - roi, - time, - tqdm, - utils, - warnings, -) +from pyCHX.chx_handlers import use_pims +from pyCHX.chx_libs import db use_pims(db) # use pims for importing eiger data, register_handler 'AD_EIGER2' and 'AD_EIGER' - -from pyCHX.chx_compress import ( - MultifileBNLCustom, - combine_binary_files, - create_compress_header, - para_compress_eigerdata, - para_segment_compress_eigerdata, - segment_compress_eigerdata, -) -from pyCHX.chx_compress_analysis import ( - Multifile, - cal_each_ring_mean_intensityc, - cal_waterfallc, - compress_eigerdata, - get_avg_imgc, - get_each_frame_intensityc, - get_each_ring_mean_intensityc, - get_time_edge_avg_img, - mean_intensityc, - plot_each_ring_mean_intensityc, - plot_waterfallc, - read_compressed_eigerdata, -) -from pyCHX.chx_correlationc import Get_Pixel_Arrayc, auto_two_Arrayc, cal_g2c, get_pixelist_interp_iq -from pyCHX.chx_correlationp import _one_time_process_errorp, auto_two_Arrayp, cal_g2p, cal_GPF, get_g2_from_ROI_GPF -from pyCHX.chx_crosscor import CrossCorrelator2, run_para_ccorr_sym -from pyCHX.chx_generic_functions import ( - R_2, - apply_mask, - average_array_withNan, - check_bad_uids, - check_lost_metadata, - check_ROI_intensity, - check_shutter_open, - combine_images, - copy_data, - create_cross_mask, - create_fullImg_with_box, - create_hot_pixel_mask, - create_polygon_mask, - create_rectangle_mask, - create_ring_mask, - create_seg_ring, - create_time_slice, - create_user_folder, - delete_data, - extract_data_from_file, - filter_roi_mask, - find_bad_pixels, - find_bad_pixels_FD, - find_good_xpcs_uids, - find_index, - find_uids, - fit_one_peak_curve, - get_averaged_data_from_multi_res, - get_avg_img, - get_bad_frame_list, - get_base_all_filenames, - get_cross_point, - get_current_pipeline_filename, - get_current_pipeline_fullpath, - get_curve_turning_points, - get_detector, - get_detectors, - get_each_frame_intensity, - get_echos, - get_eigerImage_per_file, - get_fit_by_two_linear, - get_fra_num_by_dose, - get_g2_fit_general, - get_image_edge, - get_image_with_roi, - get_img_from_iq, - get_last_uids, - get_mass_center_one_roi, - get_max_countc, - get_meta_data, - get_multi_tau_lag_steps, - get_non_uniform_edges, - get_print_uids, - get_q_rate_fit_general, - get_qval_dict, - get_qval_qwid_dict, - get_roi_mask_qval_qwid_by_shift, - get_roi_nr, - get_series_g2_taus, - get_SG_norm, - get_sid_filenames, - get_today_date, - get_touched_qwidth, - get_waxs_beam_center, - lin2log_g2, - linear_fit, - load_data, - load_mask, - load_pilatus, - ls_dir, - mask_badpixels, - mask_exclude_badpixel, - move_beamstop, - pad_length, - pload_obj, - plot1D, - plot_fit_two_linear_fit, - plot_g2_general, - plot_q_g2fitpara_general, - plot_q_rate_fit_general, - plot_q_rate_general, - plot_xy_with_fit, - plot_xy_x2, - print_dict, - psave_obj, - read_dict_csv, - refine_roi_mask, - reverse_updown, - ring_edges, - run_time, - save_array_to_tiff, - save_arrays, - save_current_pipeline, - save_dict_csv, - save_g2_fit_para_tocsv, - save_g2_general, - save_lists, - save_oavs_tifs, - sgolay2d, - shift_mask, - show_img, - show_ROI_on_image, - shrink_image, - trans_data_to_pd, - update_qval_dict, - update_roi_mask, - validate_uid, -) -from pyCHX.chx_olog import Attachment, LogEntry, update_olog_id, update_olog_uid, update_olog_uid_with_file -from pyCHX.chx_specklecp import ( - get_binned_his_std, - get_contrast, - get_his_std_from_pds, - get_xsvs_fit, - plot_g2_contrast, - plot_xsvs_fit, - save_bin_his_std, - save_KM, - xsvsc, - xsvsp, -) -from pyCHX.Create_Report import ( - create_multi_pdf_reports_for_uids, - create_one_pdf_reports_for_uids, - create_pdf_report, - export_xpcs_results_to_h5, - extract_xpcs_results_from_h5, - make_pdf_report, -) -from pyCHX.DataGonio import qphiavg -from pyCHX.SAXS import ( - fit_form_factor, - fit_form_factor2, - form_factor_residuals_bg_lmfit, - form_factor_residuals_lmfit, - get_form_factor_fit_lmfit, - poly_sphere_form_factor_intensity, - show_saxs_qmap, -) -from pyCHX.Two_Time_Correlation_Function import ( - get_aged_g2_from_g12, - get_aged_g2_from_g12q, - get_four_time_from_two_time, - get_one_time_from_two_time, - rotate_g12q_to_rectangle, - show_C12, -) -from pyCHX.XPCS_GiSAXS import ( - cal_1d_qr, - convert_gisaxs_pixel_to_q, - fit_qr_qz_rate, - get_1d_qr, - get_each_box_mean_intensity, - get_gisaxs_roi, - get_qedge, - get_qmap_label, - get_qr_tick_label, - get_qzr_map, - get_qzrmap, - get_reflected_angles, - get_t_qrc, - multi_uids_gisaxs_xpcs_analysis, - plot_gisaxs_g4, - plot_gisaxs_two_g2, - plot_qr_1d_with_ROI, - plot_qrt_pds, - plot_qzr_map, - plot_t_qrc, - show_qzr_map, - show_qzr_roi, -) -from pyCHX.XPCS_SAXS import ( - cal_g2, - combine_two_roi_mask, - create_hot_pixel_mask, - get_angular_mask, - get_circular_average, - get_cirucular_average_std, - get_each_ring_mean_intensity, - get_QrQw_From_RoiMask, - get_ring_mask, - get_seg_from_ring_mask, - get_t_iq, - get_t_iqc, - multi_uids_saxs_xpcs_analysis, - plot_circular_average, - plot_qIq_with_ROI, - plot_t_iqc, - recover_img_from_iq, - save_lists, -) diff --git a/pyCHX/chx_speckle.py b/pyCHX/chx_speckle.py index a6eb8f3..8002a56 100644 --- a/pyCHX/chx_speckle.py +++ b/pyCHX/chx_speckle.py @@ -10,7 +10,6 @@ import logging import time -import six from skbeam.core import roi from skbeam.core.utils import bin_edges_to_centers, geometric_series @@ -19,13 +18,10 @@ import sys from datetime import datetime -import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np -import scipy as sp import scipy.stats as st -from matplotlib.colors import LogNorm -from scipy.optimize import leastsq, minimize +from scipy.optimize import leastsq def xsvs( @@ -151,7 +147,7 @@ def xsvs( try: noframes = len(images) - except: + except Exception: noframes = images.length # Num= { key: [0]* len( dict_dly[key] ) for key in list(dict_dly.keys()) } @@ -403,11 +399,10 @@ def get_bin_edges(num_times, num_rois, mean_roi, max_cts): return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers -################# -##for fit -################### +# +# for fit +# -from scipy import stats from scipy.special import gamma, gammaln @@ -495,7 +490,7 @@ def nbinom_dist(bin_values, K, M): return nbinom -#########poisson +# poisson def poisson(x, K): """Poisson distribution function. K is average photon counts @@ -634,7 +629,6 @@ def fit_xsvs1( """ from lmfit import Model - from scipy.interpolate import UnivariateSpline if func == "bn": mod = Model(nbinom_dist) @@ -707,7 +701,7 @@ def fit_xsvs1( axes.set_xlabel("K/") axes.set_ylabel("P(K)") - # Using the best K and M values interpolate and get more values for fitting curve + # Using the best K and M values interpolate and get more values for fitting curve fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) if func == "bn": @@ -846,7 +840,7 @@ def plot_xsvs_g2(g2, taus, res_pargs=None, *argv, **kwargs): # plt.show() -###########################3 +# 3 # @@ -949,7 +943,7 @@ def get_xsvs_fit(spe_cts_all, K_mean, varyK=True, max_bins=None, qth=None, g2=No full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -964,7 +958,7 @@ def get_xsvs_fit(spe_cts_all, K_mean, varyK=True, max_bins=None, qth=None, g2=No ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) @@ -1115,7 +1109,6 @@ def plot_g2_contrast(contrast_factorL, g2, times, taus, q_ring_center=None, uid= range_ = range(qth, qth + 1) else: range_ = range(nq) - num_times = nt nr = len(range_) sx = int(round(np.sqrt(nr))) if nr % sx == 0: diff --git a/pyCHX/chx_specklecp.py b/pyCHX/chx_specklecp.py index d03ea3b..9bf097c 100644 --- a/pyCHX/chx_specklecp.py +++ b/pyCHX/chx_specklecp.py @@ -8,31 +8,23 @@ from __future__ import absolute_import, division, print_function import logging -import time -import six from skbeam.core import roi from skbeam.core.utils import bin_edges_to_centers, geometric_series logger = logging.getLogger(__name__) -import itertools import os -import sys from datetime import datetime from multiprocessing import Pool -import dill -import matplotlib as mpl import matplotlib.pyplot as plt import numpy as np -import scipy as sp import scipy.stats as st -from matplotlib.colors import LogNorm -from scipy.optimize import leastsq, minimize +from scipy.optimize import leastsq from tqdm import tqdm -from pyCHX.chx_compress import apply_async, go_through_FD, map_async, pass_FD, run_dill_encoded +from pyCHX.chx_compress import apply_async, pass_FD from pyCHX.chx_generic_functions import trans_data_to_pd @@ -237,8 +229,7 @@ def xsvsp_single( ) print("Histogram calculation DONE!") - del results - del res + return bin_edges, prob_k, prob_k_std_dev, his_sum @@ -555,7 +546,7 @@ def xsvsc_single( processing = 0 # print( level ) # prob_k_std_dev = np.power((prob_k_pow - - # np.power(prob_k, 2)), .5) + # np.power(prob_k, 2)), .5) for i in range(num_times): for j in range(num_roi): @@ -566,8 +557,8 @@ def xsvsc_single( prob_k[i, j] = prob_k[i, j] / his_sum[i, j] # for i in range(num_times): - # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): - # pass + # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): + # pass return bin_edges, prob_k, prob_k_std_dev, his_sum @@ -609,9 +600,9 @@ def _process( track_bad_level[level] += 1 # print (img_per_level,track_bad_level) u_labels = list(np.unique(labels)) - ############## - ##To Do list here, change histogram to bincount - ##Change error bar calculation + # + # To Do list here, change histogram to bincount + # Change error bar calculation if not (np.isnan(data).any()): for j, label in enumerate(u_labels): roi_data = data[labels == label] @@ -622,12 +613,12 @@ def _process( spe_hist = np.nan_to_num(spe_hist) # print( spe_hist.shape ) # prob_k[level, j] += (spe_hist - - # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) + # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) # print( prob_k[level, j] ) prob_k[level, j] += spe_hist # print( spe_hist.shape, prob_k[level, j] ) # prob_k_pow[level, j] += (np.power(spe_hist, 2) - - # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) + # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) def normalize_bin_edges(num_times, num_rois, mean_roi, max_cts): @@ -871,15 +862,14 @@ def get_bin_edges(num_times, num_rois, mean_roi, max_cts): return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers -################# -##for fit -################### +# +# for fit +# -from scipy import stats from scipy.special import gamma, gammaln -###########################3 -##Dev at Nov 18, 2016 +# 3 +# Dev at Nov 18, 2016 # @@ -929,8 +919,8 @@ def nbinomres(p, hist, x, hist_err=None, N=1): return err -########### -##Dev at Octo 12, 2017 +# +# Dev at Octo 12, 2017 def nbinom(p, x, mu): @@ -1078,7 +1068,7 @@ def get_xsvs_fit( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(kmean_guess) # resultL[0][0] ) + KL_val[i].append(kmean_guess) # resultL[0][0] ) else: # vary M and K fit_func = nbinomlog @@ -1093,7 +1083,7 @@ def get_xsvs_fit( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) @@ -1126,9 +1116,9 @@ def plot_xsvs_fit( """ # if qth is None: - # fig = plt.figure(figsize=(10,12)) + # fig = plt.figure(figsize=(10,12)) # else: - # fig = plt.figure(figsize=(8,8)) + # fig = plt.figure(figsize=(8,8)) max_cts = spe_cts_all[0][0].shape[0] - 1 num_times, num_rings = spe_cts_all.shape @@ -1381,7 +1371,6 @@ def plot_g2_contrast( range_ = range(qth, qth + 1) else: range_ = range(nq) - num_times = nt nr = len(range_) sx = int(round(np.sqrt(nr))) if nr % sx == 0: @@ -1491,7 +1480,7 @@ def get_xsvs_fit_old(spe_cts_all, K_mean, varyK=True, qth=None, max_bins=2, g2=N full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -1506,13 +1495,13 @@ def get_xsvs_fit_old(spe_cts_all, K_mean, varyK=True, qth=None, max_bins=2, g2=N ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) # if max_bins==2: - # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) - # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) + # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) + # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) return ML_val, KL_val, np.array(K_) @@ -1601,7 +1590,7 @@ def nbinom_dist(bin_values, K, M): return nbinom -#########poisson +# poisson def poisson(x, K): """Poisson distribution function. K is average photon counts @@ -1731,7 +1720,6 @@ def fit_xsvs1( """ from lmfit import Model - from scipy.interpolate import UnivariateSpline if func == "bn": mod = Model(nbinom_dist) @@ -1804,7 +1792,7 @@ def fit_xsvs1( axes.set_xlabel("K/") axes.set_ylabel("P(K)") - # Using the best K and M values interpolate and get more values for fitting curve + # Using the best K and M values interpolate and get more values for fitting curve fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) if func == "bn": @@ -2009,7 +1997,7 @@ def get_xsvs_fit_old1( ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -2028,7 +2016,7 @@ def get_xsvs_fit_old1( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) diff --git a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py index 2c9b9e3..c1e3fea 100644 --- a/pyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -6,7 +6,95 @@ from IPython import get_ipython from pyCHX.chx_libs import colors, markers -from pyCHX.chx_packages import * +from pyCHX.chx_packages import ( + Attachment, + Get_Pixel_Arrayc, + Multifile, + T, + acquisition_period, + apply_mask, + auto_two_Arrayc, + bp, + cal_1d_qr, + cal_each_ring_mean_intensityc, + cal_g2p, + cal_waterfallc, + check_lost_metadata, + check_ROI_intensity, + cmap_albula, + combine_images, + compress_eigerdata, + create_time_slice, + db, + export_xpcs_results_to_h5, + extract_xpcs_results_from_h5, + fit_form_factor, + get_avg_img, + get_avg_imgc, + get_bad_frame_list, + get_binned_his_std, + get_circular_average, + get_contrast, + get_four_time_from_two_time, + get_g2_fit_general, + get_max_countc, + get_meta_data, + get_multi_tau_lag_steps, + get_one_time_from_two_time, + get_pixelist_interp_iq, + get_q_rate_fit_general, + get_qzr_map, + get_sid_filenames, + get_t_iqc, + get_t_qrc, + get_xsvs_fit, + getpass, + load_data, + load_mask, + make_pdf_report, + mask_exclude_badpixel, + multi_tau_lags, + np, + os, + plot1D, + plot_circular_average, + plot_each_ring_mean_intensityc, + plot_g2_contrast, + plot_g2_general, + plot_q_rate_fit_general, + plot_qIq_with_ROI, + plot_qr_1d_with_ROI, + plot_qrt_pds, + plot_qzr_map, + plot_t_iqc, + plot_waterfallc, + plot_xsvs_fit, + plt, + print_dict, + psave_obj, + random, + ro_mask, + roi, + run_time, + save_arrays, + save_bin_his_std, + save_dict_csv, + save_g2_fit_para_tocsv, + save_g2_general, + save_KM, + save_lists, + show_C12, + show_img, + show_qzr_roi, + show_ROI_on_image, + show_saxs_qmap, + time, + total_res, + update_olog_uid, + validate_uid, + warnings, + xsvsp, +) ip = get_ipython() ip.run_line_magic( @@ -33,7 +121,6 @@ def get_t_iqc_uids(uid_list, setup_pargs, slice_num=10, slice_width=1): iqsts = {} tstamp = {} qs = {} - label = [] for uid in uid_list: md = get_meta_data(uid) luid = md["uid"] @@ -382,9 +469,9 @@ def plot_entries_from_uids( return fig, ax -#################################################################################################### -##For real time analysis## -################################################################################################# +# +# For real time analysis# +# def get_iq_from_uids(uids, mask, setup_pargs): @@ -501,7 +588,7 @@ def wait_data_acquistion_finish(uid, wait_time=2, max_try_num=3): FINISH = True print("The data acquistion finished.") print("Starting to do something here...") - except: + except Exception: wait_func(wait_time=wait_time) w += 1 print("Try number: %s" % w) @@ -593,7 +680,7 @@ def do_compress_on_line(start_time, stop_time, mask_dict=None, mask=None, wait_t ) update_olog_uid(uid=md["uid"], text="Data are on-line sparsified!", attachments=None) - except: + except Exception: print("There are something wrong with this data: %s..." % uid) print("*" * 50) return time.time() - t0 @@ -632,14 +719,14 @@ def realtime_xpcs_analysis( if finish: try: md = get_meta_data(uid) - ##corect some metadata + # corect some metadata if md_update is not None: md.update(md_update) # if 'username' in list(md.keys()): # try: - # md_cor['username'] = md_update['username'] - # except: - # md_cor = None + # md_cor['username'] = md_update['username'] + # except Exception: + # md_cor = None # uid = uid[:8] # print(md_cor) if not emulation: @@ -648,7 +735,7 @@ def realtime_xpcs_analysis( uid, run_pargs=run_pargs, md_cor=None, return_res=False, clear_plot=clear_plot ) # update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) - except: + except Exception: print("There are something wrong with this data: %s..." % uid) else: print("\nThis is not a XPCS series. We will simiply ignore it.") @@ -660,9 +747,9 @@ def realtime_xpcs_analysis( return time.time() - t0 -#################################################################################################### -##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## -################################################################################################# +# +# compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress# +# def compress_multi_uids( uids, mask, @@ -734,9 +821,9 @@ def compress_multi_uids( print("Done!") -#################################################################################################### -##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## -################################################################################################# +# +# get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid # +# def get_two_time_mulit_uids( @@ -801,7 +888,7 @@ def get_two_time_mulit_uids( data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) np.save(filename, g12b) - del g12b + print("The two time correlation function for uid={} is saved as {}.".format(uid, filename)) @@ -946,17 +1033,17 @@ def get_series_one_time_mulit_uids( try: g2_path = path + uid + "/" g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) - except: + except Exception: g2_path = path + md["uid"] + "/" g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) try: exp_time = float(md["cam_acquire_time"]) # *1000 #from second to ms - except: + except Exception: exp_time = float(md["exposure time"]) # * 1000 #from second to ms if trans is None: try: transi = md["transmission"] - except: + except Exception: transi = [1] else: transi = trans[i] @@ -1111,7 +1198,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_fit_form = False, run_waterfall = True,#False, run_t_ROI_Inten = True, - #run_fit_g2 = True, + # run_fit_g2 = True, fit_g2_func = 'stretched', run_one_time = True,#False, run_two_time = True,#False, @@ -1131,8 +1218,8 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= num_rings = 12, gap_ring_number = 6, number_rings= 1, - #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 - #width = 0.0002 + # qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + # width = 0.0002 qth_interest = 1, #the intested single qth use_sqnorm = False, use_imgsum_norm = True, @@ -1159,21 +1246,21 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_xsvs = run_pargs["run_xsvs"] try: run_dose = run_pargs["run_dose"] - except: + except Exception: run_dose = False - ############################################################### + # if scat_geometry == "gi_saxs": # to be done for other types run_xsvs = False - ############################################################### + # - ############################################################### + # if scat_geometry == "ang_saxs": run_xsvs = False run_waterfall = False run_two_time = False run_four_time = False run_t_ROI_Inten = False - ############################################################### + # if "bin_frame" in list(run_pargs.keys()): bin_frame = run_pargs["bin_frame"] bin_frame_number = run_pargs["bin_frame_number"] @@ -1191,12 +1278,12 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= use_imgsum_norm = run_pargs["use_imgsum_norm"] try: use_sqnorm = run_pargs["use_sqnorm"] - except: + except Exception: use_sqnorm = False try: inc_x0 = run_pargs["inc_x0"] inc_y0 = run_pargs["inc_y0"] - except: + except Exception: inc_x0 = None inc_y0 = None @@ -1227,14 +1314,13 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= g12b = None taus4 = None g4 = None - times_xsv = None contrast_factorL = None qth_interest = run_pargs["qth_interest"] pdf_version = run_pargs["pdf_version"] try: username = run_pargs["username"] - except: + except Exception: username = getpass.getuser() data_dir0 = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") @@ -1301,7 +1387,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= "beam_center_y", ], ) - ## Overwrite Some Metadata if Wrong Input + # Overwrite Some Metadata if Wrong Input dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( md, Nimg, inc_x0=inc_x0, inc_y0=inc_y0, pixelsize=7.5 * 10 * (-5) ) @@ -1327,7 +1413,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= if md["detector"] == "eiger4m_single_image": mask[:, 2069] = 0 # False #Concluded from the previous results show_img(mask, image_name=uidstr + "_mask", save=True, path=data_dir) - mask_load = mask.copy() + mask.copy() imgsa = apply_mask(imgs, mask) img_choice_N = 2 @@ -1401,7 +1487,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= mask = mask * Chip_Mask # %system free && sync && echo 3 > /proc/sys/vm/drop_caches && free - ## Get bad frame list by a polynominal fit + # Get bad frame list by a polynominal fit bad_frame_list = get_bad_frame_list( imgsum, fit=True, @@ -1414,7 +1500,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= ) print("The bad frame list length is: %s" % len(bad_frame_list)) - ### Creat new mask by masking the bad pixels and get new avg_img + # Creat new mask by masking the bad pixels and get new avg_img if False: mask = mask_exclude_badpixel(bp, mask, md["uid"]) avg_img = get_avg_imgc(FD, sampling=1, bad_frame_list=bad_frame_list) @@ -1446,17 +1532,17 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, ) - ############for SAXS and ANG_SAXS (Flow_SAXS) + # for SAXS and ANG_SAXS (Flow_SAXS) if scat_geometry == "saxs" or scat_geometry == "ang_saxs": # show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, - # image_name= uidstr + '_img_avg', save=True) + # image_name= uidstr + '_img_avg', save=True) # np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) # try: - # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) - # except: - # hmask=1 + # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) + # except Exception: + # hmask=1 hmask = 1 qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, mask * hmask * Chip_Mask, pargs=setup_pargs, save=True @@ -1472,7 +1558,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= ) # pd = trans_data_to_pd( np.where( hmask !=1), - # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') # pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) @@ -1570,9 +1656,9 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= qrt_pds = get_t_qrc(FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid=uidstr) plot_qrt_pds(qrt_pds, time_edge, qz_index=0, uid=uidstr, path=data_dir) - ############################## - ##the below works for all the geometries - ######################################## + # + # the below works for all the geometries + # if scat_geometry != "ang_saxs": roi_inten = check_ROI_intensity( avg_img, roi_mask, ring_number=qth_interest, uid=uidstr, save=True, path=data_dir @@ -1591,7 +1677,6 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, beg=FD.beg, ) - ring_avg = None if run_t_ROI_Inten: times_roi, mean_int_sets = cal_each_ring_mean_intensityc( @@ -1671,7 +1756,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= # if run_one_time: # plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, - # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') plot_g2_general( g2_dict={1: g2, 2: g2_fit}, @@ -1986,7 +2071,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= N = len(imgs) try: tr = md["transmission"] - except: + except Exception: tr = 1 if "dose_frame" in list(run_pargs.keys()): dose_frame = run_pargs["dose_frame"] @@ -2030,11 +2115,11 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= nopr = np.bincount(qind, minlength=(noqs + 1))[1:] # time_steps = np.array( utils.geometric_series(2, len(imgs) ) ) time_steps = [0, 1] # only run the first two levels - num_times = len(time_steps) + len(time_steps) times_xsvs = exposuretime + (2 ** (np.arange(len(time_steps))) - 1) * timeperframe print("The max counts are: %s" % max_cts) - ### Do historam + # Do historam if roi_avg is None: times_roi, mean_int_sets = cal_each_ring_mean_intensityc( FD, @@ -2121,7 +2206,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, ) - ### Get contrast + # Get contrast contrast_factorL = get_contrast(ML_val) spec_km_pds = save_KM( spec_kmean, KL_val, ML_val, qs=qr, level_time=times_xsvs, uid=uid_, path=data_dir @@ -2331,7 +2416,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_dose=run_dose, report_type=scat_geometry, ) - ## Attach the PDF report to Olog + # Attach the PDF report to Olog if att_pdf_report: os.environ["HTTPS_PROXY"] = "https://proxy:8888" os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" @@ -2339,7 +2424,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= atch = [Attachment(open(pname, "rb"))] try: update_olog_uid(uid=md["uid"], text="Add XPCS Analysis PDF Report", attachments=atch) - except: + except Exception: print( "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." % pname @@ -2348,7 +2433,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= if show_plot: plt.show() # else: - # plt.close('all') + # plt.close('all') if clear_plot: plt.close("all") if return_res: diff --git a/pyCHX/movie_maker.py b/pyCHX/movie_maker.py index 0d42cf9..cac22a4 100644 --- a/pyCHX/movie_maker.py +++ b/pyCHX/movie_maker.py @@ -1,6 +1,6 @@ -################################ -######Movie_maker############### -################################ +# +# Movie_maker# +# def read_imgs(inDir): @@ -33,7 +33,7 @@ def select_regoin( try: img_[ys:ye, xs:xe] = True - except: + except Exception: img_[ys:ye, xs:xe, :] = True pixellist_ = np.where(img_.ravel())[0] # pixellist_ = img_.ravel() @@ -49,7 +49,7 @@ def select_regoin( else: try: imgx = img[ys:ye, xs:xe] - except: + except Exception: imgx = img[ys:ye, xs:xe, :] return imgx @@ -59,7 +59,6 @@ def save_png_series( imgs, ROI=None, logs=True, outDir=None, uid=None, vmin=None, vmax=None, cmap="viridis", dpi=100 ): import matplotlib.pyplot as plt - import numpy as np from matplotlib.colors import LogNorm """ @@ -81,7 +80,7 @@ def save_png_series( save png files """ - if uid == None: + if uid is None: uid = "uid" num_frame = 0 for img in imgs: @@ -91,7 +90,6 @@ def save_png_series( ax.get_yaxis().set_visible(False) if ROI is None: i0 = img - asp = 1.0 else: i0 = select_regoin( img, @@ -99,7 +97,7 @@ def save_png_series( keep_shape=False, ) xs, xe, ys, ye = ROI - asp = (ye - ys) / float(xe - xs) + (ye - ys) / float(xe - xs) ax.set_aspect("equal") if not logs: @@ -135,7 +133,6 @@ def movie_maker( ): import matplotlib.animation as animation import matplotlib.pyplot as plt - import numpy as np from matplotlib.colors import LogNorm """ @@ -172,8 +169,8 @@ def movie_maker( Returns ------- - #ani : - # movie + # ani : + # movie """ @@ -185,7 +182,6 @@ def movie_maker( if ROI is None: i0 = imgs[0] - asp = 1.0 else: i0 = select_regoin( @@ -194,7 +190,7 @@ def movie_maker( keep_shape=False, ) xs, xe, ys, ye = ROI - asp = (ye - ys) / float(xe - xs) + (ye - ys) / float(xe - xs) ax.set_aspect("equal") # print( cmap, vmin, vmax ) diff --git a/pyCHX/v2/_commonspeckle/DEVs.py b/pyCHX/v2/_commonspeckle/DEVs.py index 19fd4e5..ea6dc2a 100644 --- a/pyCHX/v2/_commonspeckle/DEVs.py +++ b/pyCHX/v2/_commonspeckle/DEVs.py @@ -76,7 +76,7 @@ def plot_xy_with_fit( return ax -#############For APD detector +# For APD detector def get_pix_g2_fft(time_inten): """YG Dev@CHX 2018/12/4 get g2 for oneD intensity g2 = G/(P*F) @@ -134,7 +134,7 @@ def get_pix_g2_PF(time_inten): return P, F -################### +# def get_ab_correlation(a, b): @@ -207,9 +207,9 @@ def auto_correlation_fft_padding_zeros(a, axis=-1): Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) In numpy form auto_cor(arr) = ifft( - fft( arr, n=2N-1, axis=axis ) ##padding enough zeros - ## for axis - * np.conjugate( ## conju for reverse array + fft( arr, n=2N-1, axis=axis ) #padding enough zeros + # for axis + * np.conjugate( # conju for reverse array fft(arr , n=2N-1, axis=axis) ) ) #do reverse fft Input: @@ -251,9 +251,9 @@ def auto_correlation_fft(a, axis=-1): Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) In numpy form auto_cor(arr) = ifft( - fft( arr, n=2N-1, axis=axis ) ##padding enough zeros - ## for axis - * np.conjugate( ## conju for reverse array + fft( arr, n=2N-1, axis=axis ) #padding enough zeros + # for axis + * np.conjugate( # conju for reverse array fft(arr , n=2N-1, axis=axis) ) ) #do reverse fft Input: @@ -286,7 +286,7 @@ def multitau(Ipix, bind, lvl=12, nobuf=8): plot(tt[1:],g2[1:,i]) will plot each g2. """ # if num_lev is None: - # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 # print(nobuf,nolvl) nobins = bind.max() + 1 nobufov2 = nobuf // 2 @@ -345,7 +345,7 @@ def average_array_withNan(array, axis=0, mask=None): array_ = np.ma.masked_array(array, mask=mask) try: sums = np.array(np.ma.sum(array_[:, :], axis=axis)) - except: + except Exception: sums = np.array(np.ma.sum(array_[:], axis=axis)) cts = np.sum(~mask, axis=axis) @@ -412,8 +412,8 @@ def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=No # IF_mask = mask_pix[tau: Nt,: ] # IPF_mask = IP_mask | IF_mask # IPFm = average_array_withNan(IP*IF, axis = 0, )#mask= IPF_mask ) - # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) - # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) + # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) + # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) G2[tau_ind] = average_array_withNan( IP * IF, axis=0, @@ -428,8 +428,8 @@ def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=No ) # IFm tau_ind += 1 # for i in range(G2.shape[0]-1, 0, -1): - # if np.isnan(G2[i,0]): - # gmax = i + # if np.isnan(G2[i,0]): + # gmax = i gmax = tau_ind return G2[:gmax, :], Gp[:gmax, :], Gf[:gmax, :] @@ -447,7 +447,7 @@ def autocor_xytframe(self, n): return crl / (IP * IF) * FN -###################For Fit +# For Fit import matplotlib.pyplot as plt import numpy as np @@ -493,12 +493,12 @@ def _residuals(p, x, y, sigy, pall, adj, fun): def fitpr(chisq, a, sigmaa, title=None, lbl=None): """nicely print out results of a fit""" # get fitted results. - if lbl == None: + if lbl is None: lbl = [] for i in xrange(a.size): lbl.append("A%(#)02d" % {"#": i}) # print resuls of a fit. - if title != None: + if title is not None: print(title) print(" chisq=%(c).4f" % {"c": chisq}) for i in range(a.size): @@ -529,7 +529,7 @@ def Gaussian(x, p): return g -###########For ellipse shaped sectors by users +# For ellipse shaped sectors by users def elps_r(a, b, theta): """ Returns the radius of an ellipse with semimajor/minor axes a/b diff --git a/pyCHX/v2/_commonspeckle/DataGonio.py b/pyCHX/v2/_commonspeckle/DataGonio.py index 686e7f0..2c34785 100644 --- a/pyCHX/v2/_commonspeckle/DataGonio.py +++ b/pyCHX/v2/_commonspeckle/DataGonio.py @@ -232,7 +232,7 @@ def convert_Qmap_old(img, qx_map, qy_map=None, bins=None, rangeq=None): # Mask -################################################################################ +# class Mask(object): """Stores the matrix of pixels to be excluded from further analysis.""" @@ -301,11 +301,11 @@ def invert(self): self.data = -1 * (self.data - 1) # End class Mask(object) - ######################################## + # # Calibration -################################################################################ +# class Calibration(object): """Stores aspects of the experimental setup; especially the calibration parameters for a particular detector. That is, the wavelength, detector @@ -326,7 +326,7 @@ def __init__(self, wavelength_A=None, distance_m=None, pixel_size_um=None): self.clear_maps() # Experimental parameters - ######################################## + # def set_wavelength(self, wavelength_A): """Set the experimental x-ray wavelength (in Angstroms).""" @@ -422,7 +422,7 @@ def get_q_per_pixel(self): return self.q_per_pixel # Maps - ######################################## + # def clear_maps(self): self.r_map_data = None @@ -544,11 +544,11 @@ def _generate_qxyz_maps(self): ) # End class Calibration(object) - ######################################## + # # CalibrationGonio -################################################################################ +# class CalibrationGonio(Calibration): """ The geometric claculations used here are described: @@ -557,7 +557,7 @@ class CalibrationGonio(Calibration): """ # Experimental parameters - ######################################## + # def set_angles( self, @@ -673,7 +673,7 @@ def get_ratioDw(self): return self.distance_m / (width_mm / 1000.0) # Maps - ######################################## + # def q_map(self): if self.q_map_data is None: diff --git a/pyCHX/v2/_commonspeckle/SAXS.py b/pyCHX/v2/_commonspeckle/SAXS.py index afdfe17..daee676 100644 --- a/pyCHX/v2/_commonspeckle/SAXS.py +++ b/pyCHX/v2/_commonspeckle/SAXS.py @@ -133,7 +133,7 @@ def find_index_old(x, x0, tolerance=None): N = len(x) i = 0 position = None - if tolerance == None: + if tolerance is None: tolerance = (x[1] - x[0]) / 2.0 if x0 > max(x): position = len(x) - 1 @@ -489,7 +489,7 @@ def get_form_factor_fit2( # print(q4_bg) # resL = leastsq( fit_funcs, [ p ], args=( iq_, q_, num_points, spread, fit_func, function ), - # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) + # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) # radius, sigma, delta_rho, background = np.abs(pfit) if not q4_bg: @@ -531,7 +531,7 @@ def get_form_factor_fit2( for i in range(len(pfit)): try: error.append(np.absolute(pcov[i][i]) ** 0.5) - except: + except Exception: error.append(None) pfit_leastsq = pfit perr_leastsq = np.array(error) @@ -944,7 +944,7 @@ def show_saxs_qmap( if w < minW: img_ = img[cx - w // 2 : cx + w // 2, cy + w // 2 : cy + w // 2] # elif w > maxW: - # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = + # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = ROI = [ max(0, center[0] - w), @@ -1013,12 +1013,12 @@ def show_saxs_qmap( return ax -######################## -##Fit sphere by scipy.leastsq fit +# +# Fit sphere by scipy.leastsq fit def fit_sphere_form_factor_func(parameters, ydata, xdata, yerror=None, nonvariables=None): - """##Develop by YG at July 28, 2017 @CHX + """#Develop by YG at July 28, 2017 @CHX This function is for fitting form factor of polyderse spherical particles by using scipy.leastsq fit radius, sigma, delta_rho, background = parameters @@ -1044,7 +1044,7 @@ def fit_sphere_form_factor_by_leastsq( pq, fit_range=None, ): - """##Develop by YG at July 28, 2017 @CHX + """#Develop by YG at July 28, 2017 @CHX Fitting form factor of polyderse spherical particles by using scipy.leastsq fit Input: radius, sigma, delta_rho, background = p0 @@ -1070,7 +1070,7 @@ def fit_sphere_form_factor_by_leastsq( def plot_fit_sphere_form_factor(q, pq, res, p0=None, xlim=None, ylim=None): - """##Develop by YG at July 28, 2017 @CHX""" + """#Develop by YG at July 28, 2017 @CHX""" if p0 is not None: radius, sigma, delta_rho, background = p0 diff --git a/pyCHX/v2/_commonspeckle/Stitching.py b/pyCHX/v2/_commonspeckle/Stitching.py index 8658290..ba3110e 100644 --- a/pyCHX/v2/_commonspeckle/Stitching.py +++ b/pyCHX/v2/_commonspeckle/Stitching.py @@ -95,7 +95,7 @@ def Correct_Overlap_Images_Intensities( fig = plt.figure()# figsize=[2,8]) for i in range(len(infiles)): - #print(i) + # print(i) ax = fig.add_subplot(1,8, i+1) d = process.load( infiles[i] ) show_img( dataM[i], logs = True, show_colorbar= False,show_ticks =False, @@ -188,14 +188,14 @@ def stitch_WAXS_in_Qspace(dataM, phis, calibration, dx=0, dy=22, dz=0, dq=0.015, phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV - #calibration.set_image_size( data.shape[1], data.shape[0] ) + # calibration.set_image_size( data.shape[1], data.shape[0] ) calibration.set_image_size(195, height=1475) # Pilatus300kW vertical calibration.set_pixel_size(pixel_size_um=172.0) calibration.set_beam_position(97.0, 1314.0) calibration.set_distance(0.275) Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) - #Get center of the qmap + # Get center of the qmap bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) print( bx, by ) @@ -329,7 +329,7 @@ def get_phi(filename, phi_offset=0, phi_start=4.5, phi_spacing=4.0, polarity=-1, return phi_c -############For CHX beamline +# For CHX beamline def get_qmap_qxyz_range( @@ -430,14 +430,14 @@ def stitch_WAXS_in_Qspace_CHX( phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV - #calibration.set_image_size( data.shape[1], data.shape[0] ) + # calibration.set_image_size( data.shape[1], data.shape[0] ) calibration.set_image_size(195, height=1475) # Pilatus300kW vertical calibration.set_pixel_size(pixel_size_um=172.0) calibration.set_beam_position(97.0, 1314.0) calibration.set_distance(0.275) Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) - #Get center of the qmap + # Get center of the qmap bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) print( bx, by ) """ diff --git a/pyCHX/v2/_commonspeckle/Two_Time_Correlation_Function.py b/pyCHX/v2/_commonspeckle/Two_Time_Correlation_Function.py index 6d05898..e246e32 100644 --- a/pyCHX/v2/_commonspeckle/Two_Time_Correlation_Function.py +++ b/pyCHX/v2/_commonspeckle/Two_Time_Correlation_Function.py @@ -1,8 +1,8 @@ -###################################################################################### -########Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL################ -########Time correlation function, include one-time, two-time, four-time############## -########Muli-tau method, array-operation method####################################### -###################################################################################### +# +# Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL# +# Time correlation function, include one-time, two-time, four-time# +# Muli-tau method, array-operation method# +# import itertools @@ -71,7 +71,7 @@ def __init__(self, indexable, pixelist): # self.shape = indexable.shape try: self.length = len(indexable) - except: + except Exception: self.length = indexable.length def get_data(self): @@ -95,7 +95,7 @@ def __init__(self, indexable, mask): self.mask = mask try: self.shape = indexable.shape - except: + except Exception: # if self.shape = [len(indexable), indexable[0].shape[0], indexable[0].shape[1]] # self.shape = indexable.shape @@ -252,10 +252,10 @@ def auto_two_Array(data, rois, data_pixel=None): g12b[:, :, qi - 1] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] # print ( proi, int( qi //( Unitq) ) ) - # if int( qi //( Unitq) ) == proi: - # sys.stdout.write("#") - # sys.stdout.flush() - # proi += 1 + # if int( qi //( Unitq) ) == proi: + # sys.stdout.write("#") + # sys.stdout.flush() + # proi += 1 elapsed_time = time.time() - start_time print("Total time: %.2f min" % (elapsed_time / 60.0)) @@ -263,14 +263,14 @@ def auto_two_Array(data, rois, data_pixel=None): return g12b -#################################### -##Derivation of Two time correlation -##################################### +# +# Derivation of Two time correlation +# -##################################### +# # get one-time @different age -##################################### +# def get_qedge2(qstart, qend, qwidth, noqs, return_int=False): @@ -438,7 +438,7 @@ def get_aged_g2_from_g12q(g12q, age_edge, age_center=None, timeperframe=1, time_ arr = rotate_g12q_to_rectangle(g12q) m, n = arr.shape # m should be 2*n-1 # age_edge, age_center = get_qedge( qstart=slice_start,qend= slice_end, - # qwidth = slice_width, noqs =slice_num ) + # qwidth = slice_width, noqs =slice_num ) # print(arr.shape) age_edge = np.int_(age_edge) if age_center is None: @@ -739,7 +739,7 @@ def plot_aged_g2(g2_aged, tau=None, timeperframe=1, ylim=None, xlim=None): ax.set_ylim(xlim) -##################################### +# # get fout-time @@ -924,9 +924,9 @@ def histogram_taus(taus, hisbin=20, plot=True, timeperframe=1): return his -##################################### +# # get one-time -##################################### +# def get_one_time_from_two_time_old(g12, norms=None, nopr=None): @@ -1058,7 +1058,7 @@ def get_four_time_from_two_time(g12, g2=None, rois=None): return g4f12 -###### +# def make_g12_mask(badframes_list, g12_shape): """ Dec 16, 2015, Y.G.@CHX @@ -1275,7 +1275,7 @@ def show_C12( else: timeperframe = 1 - if "timeoffset" in kwargs.keys(): ### added timeoffset here + if "timeoffset" in kwargs.keys(): # added timeoffset here timeoffset = kwargs["timeoffset"] else: timeoffset = 0 @@ -1318,7 +1318,7 @@ def show_C12( fig, ax = fig_ax # extent=[0, data.shape[0]*timeperframe, 0, data.shape[0]*timeperframe ] - extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset ### added timeoffset to extend + extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset # added timeoffset to extend if logs: im = imshow( diff --git a/pyCHX/v2/_commonspeckle/XPCS_GiSAXS.py b/pyCHX/v2/_commonspeckle/XPCS_GiSAXS.py index 2bf9974..6fd85e6 100644 --- a/pyCHX/v2/_commonspeckle/XPCS_GiSAXS.py +++ b/pyCHX/v2/_commonspeckle/XPCS_GiSAXS.py @@ -87,8 +87,8 @@ def get_gisaxs_roi(Qr, Qz, qr_map, qz_map, mask=None, qval_dict=None): return roi_mask, qval_dict -############ -##developed at Octo 11, 2016 +# +# developed at Octo 11, 2016 def get_qr(data, Qr, Qz, qr, qz, mask=None): """Octo 12, 2016, Y.G.@CHX plot one-d of I(q) as a function of qr for different qz @@ -102,12 +102,12 @@ def get_qr(data, Qr, Qz, qr, qz, mask=None): Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,.... Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -159,9 +159,9 @@ def get_qr(data, Qr, Qz, qr, qz, mask=None): return df -######################## +# # get one-d of I(q) as a function of qr for different qz -##################### +# def cal_1d_qr( @@ -194,14 +194,14 @@ def cal_1d_qr( Plot 1D cureve as a function of Qr for each Qz Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -433,9 +433,9 @@ def plot_t_qrc(qr_1d, frame_edge, save=False, pargs=None, fontsize=8, *argv, **k ) -########################################## -###Functions for GiSAXS -########################################## +# +# Functions for GiSAXS +# def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): @@ -453,9 +453,9 @@ def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): return y -########################################### +# # for Q-map, convert pixel to Q -########################################### +# def convert_Qmap(img, qx_map, qy_map=None, bins=None, rangeq=None, mask=None, statistic="sum"): @@ -693,9 +693,9 @@ def get_qedge2( return qedge, qcenter -########################################### +# # for plot Q-map -########################################### +# def get_qmap_label(qmap, qedge): @@ -860,14 +860,14 @@ def get_1d_qr( Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -1084,14 +1084,14 @@ def get_qr_tick_label(qr, label_array_qr, inc_x0, interp=True): w = np.where(rticks <= inc_x0)[0] rticks1 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) rticks_label1 = np.round(rticks_label[w], 3) - except: + except Exception: rticks_label1 = [] try: w = np.where(rticks > inc_x0)[0] rticks2 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) rticks = np.append(rticks1, rticks2) rticks_label2 = np.round(rticks_label[w], 3) - except: + except Exception: rticks_label2 = [] rticks_label = np.append(rticks_label1, rticks_label2) @@ -1176,7 +1176,7 @@ def get_qzr_map(qr, qz, inc_x0, Nzline=10, Nrline=10, interp=True, return_qrz_la # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) try: rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) - except: + except Exception: rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) # stride = int(len(zticks)/10) ticks = [zticks, zticks_label, rticks, rticks_label] @@ -1352,7 +1352,7 @@ def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10, interp=True, * # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) try: rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) - except: + except Exception: rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) # stride = int(len(zticks)/10) @@ -1821,7 +1821,7 @@ def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *a try: qz_center = res_pargs["qz_center"] qr_center = res_pargs["qr_center"] - except: + except Exception: roi_label = res_pargs["roi_label"] path = res_pargs["path"] @@ -1835,7 +1835,7 @@ def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *a for qz in qz_center: for qr in qr_center: columns.append([str(qz), str(qr)]) - except: + except Exception: columns.append([v for (k, v) in roi_label.items()]) df.columns = columns @@ -1914,10 +1914,10 @@ def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, # uid=res_pargs['uid'] num_rings = g2.shape[1] - beta = np.zeros(num_rings) # contrast factor - rate = np.zeros(num_rings) # relaxation rate - alpha = np.zeros(num_rings) # alpha - baseline = np.zeros(num_rings) # baseline + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline if function == "simple_exponential" or function == "simple": _vars = np.unique(_vars + ["alpha"]) @@ -2141,7 +2141,7 @@ def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, # GiSAXS End -############################### +# def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, plot_=True, *argv, **kwargs): @@ -2154,7 +2154,7 @@ def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, p mean_int_sets, index_list = roi.mean_intensity(np.array(data_series[::sampling]), box_mask) try: N = len(data_series) - except: + except Exception: N = data_series.length times = np.arange(N) * timeperframe # get the time for each frame num_rings = len(np.unique(box_mask)[1:]) @@ -2232,7 +2232,7 @@ def fit_qr_qz_rate(qr, qz, rate, plot_=True, *argv, **kwargs): for i, qz_ in enumerate(qz): try: y = np.array(rate["rate"][i * Nqr : (i + 1) * Nqr]) - except: + except Exception: y = np.array(rate[i * Nqr : (i + 1) * Nqr]) # print( len(x), len(y) ) @@ -2465,7 +2465,7 @@ def multi_uids_gisaxs_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2498,7 +2498,7 @@ def multi_uids_gisaxs_xpcs_analysis( md["Measurement"] = db[uid]["start"]["Measurement"] # md['sample']=db[uid]['start']['sample'] # print( md['Measurement'] ) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2510,7 +2510,7 @@ def multi_uids_gisaxs_xpcs_analysis( acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... setup_pargs = dict( uid=uid, dpix=dpix, Ldet=Ldet, lambda_=lambda_, timeperframe=timeperframe, path=data_dir ) diff --git a/pyCHX/v2/_commonspeckle/XPCS_SAXS.py b/pyCHX/v2/_commonspeckle/XPCS_SAXS.py index f400771..4966a97 100644 --- a/pyCHX/v2/_commonspeckle/XPCS_SAXS.py +++ b/pyCHX/v2/_commonspeckle/XPCS_SAXS.py @@ -392,15 +392,15 @@ def circular_average( image_mask = np.ravel(image) # if nx is None: #make a one-pixel width q - # nx = int( max_r - min_r) + # nx = int( max_r - min_r) # if min_x is None: - # min_x= int( np.min( binr)) - # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) + # min_x= int( np.min( binr)) + # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) # if max_x is None: - # max_x = int( np.max(binr )) - # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) + # max_x = int( np.max(binr )) + # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) # if nx is None: - # nx = max_x_ - min_x_ + # nx = max_x_ - min_x_ # binr_ = np.int_( binr /(np.sqrt(pixel_size[1]*pixel_size[0] )) ) binr_ = binr / (np.sqrt(pixel_size[1] * pixel_size[0])) @@ -467,7 +467,7 @@ def get_circular_average( avg_img, center, threshold=0, nx=nx, pixel_size=(dpix, dpix), mask=mask, min_x=min_x, max_x=max_x ) qp_ = qp * dpix - # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) + # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) two_theta = utils.radius_to_twotheta(Ldet, qp_) q = utils.twotheta_to_q(two_theta, lambda_) if plot_: @@ -1168,7 +1168,7 @@ def get_angular_mask( """ mask: 2D-array inner_angle # the starting angle in unit of degree - outer_angle # the ending angle in unit of degree + outer_angle # the ending angle in unit of degree width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI edges: default, None. otherwise, give a customized angle edges num_angles # number of angles @@ -1254,7 +1254,7 @@ def get_angular_mask_old( """ mask: 2D-array inner_angle # the starting angle in unit of degree - outer_angle # the ending angle in unit of degree + outer_angle # the ending angle in unit of degree width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI edges: default, None. otherwise, give a customized angle edges num_angles # number of angles @@ -1352,7 +1352,7 @@ def get_ring_mask( return_q_in_pixel=False, ): # def get_ring_mask( mask, inner_radius= 0.0020, outer_radius = 0.009, width = 0.0002, num_rings = 12, - # edges=None, unit='pixel',pargs=None ): + # edges=None, unit='pixel',pargs=None ): """ mask: 2D-array inner_radius #radius of the first ring @@ -1380,11 +1380,11 @@ def get_ring_mask( # qc = np.int_( np.linspace( inner_radius,outer_radius, num_rings ) ) # edges = np.zeros( [ len(qc), 2] ) # if width%2: - # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 # else: - # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 - # find the edges of the required rings + # find the edges of the required rings if edges is None: if num_rings != 1: spacing = (outer_radius - inner_radius - num_rings * width) / (num_rings - 1) # spacing between rings @@ -1714,9 +1714,9 @@ def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, retu # title_qa = '%.2f'%( ang_center[sn]) + r'$^\circ$' + '( %d )'%(i) # if num_qr==1: - # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa + # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa # else: - # title = title_qa + # title = title_qa title = title_qa ax.set_title(title, y=1.1, fontsize=12) y = g2[:, i] @@ -1744,9 +1744,9 @@ def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, retu return fig -############################################ -##a good func to fit g2 for all types of geogmetries -############################################ +# +# a good func to fit g2 for all types of geogmetries +# def fit_saxs_rad_ang_g2( @@ -1770,8 +1770,8 @@ def fit_saxs_rad_ang_g2( 'streched_exponential': fit by a streched exponential function, defined as beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline - #fit_vibration: - # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation + # fit_vibration: + # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation Returns ------- @@ -1815,14 +1815,14 @@ def fit_saxs_rad_ang_g2( print("Please give ang_center") num_rings = g2.shape[1] - beta = np.zeros(num_rings) # contrast factor - rate = np.zeros(num_rings) # relaxation rate - alpha = np.zeros(num_rings) # alpha - baseline = np.zeros(num_rings) # baseline + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline freq = np.zeros(num_rings) if function == "flow_para_function" or function == "flow_para": - flow = np.zeros(num_rings) # baseline + flow = np.zeros(num_rings) # baseline if "fit_variables" in kwargs: additional_var = kwargs["fit_variables"] _vars = [k for k in list(additional_var.keys()) if additional_var[k] is False] @@ -2130,7 +2130,7 @@ def multi_uids_saxs_flow_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector, reverse=True) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2180,7 +2180,7 @@ def multi_uids_saxs_flow_xpcs_analysis( # md['sample']= 'PS205000-PMMA-207000-SMMA3' print(md["Measurement"]) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2191,7 +2191,7 @@ def multi_uids_saxs_flow_xpcs_analysis( acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... center = md["center"] setup_pargs = dict( @@ -2206,7 +2206,7 @@ def multi_uids_saxs_flow_xpcs_analysis( md["avg_img"] = avg_img # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], - # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) min_inten = 10 # good_start = np.where( np.array(imgsum) > min_inten )[0][0] @@ -2223,7 +2223,7 @@ def multi_uids_saxs_flow_xpcs_analysis( print("The good_end frame number is: %s " % good_end_) norm = None - ################### + # # Do correlaton here for nconf, seg_mask in enumerate([seg_mask_v, seg_mask_p]): @@ -2450,7 +2450,7 @@ def multi_uids_saxs_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector, reverse=True) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2499,7 +2499,7 @@ def multi_uids_saxs_xpcs_analysis( # md['sample']= 'PS205000-PMMA-207000-SMMA3' print(md["Measurement"]) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2510,7 +2510,7 @@ def multi_uids_saxs_xpcs_analysis( acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... center = md["center"] setup_pargs = dict( @@ -2525,7 +2525,7 @@ def multi_uids_saxs_xpcs_analysis( md["avg_img"] = avg_img # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], - # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) min_inten = 10 # good_start = np.where( np.array(imgsum) > min_inten )[0][0] @@ -2672,7 +2672,7 @@ def plot_mul_g2(g2s, md): # print ( len_tau, len(y)) # ax.semilogx(taus[1:len_], y[1:len_], marker = '%s'%next(markers_), color='%s'%next(colors_), - # markersize=6, label = '%s'%sid) + # markersize=6, label = '%s'%sid) ax.semilogx( taus[1:len_], y[1:len_], marker=markers[i], color=colors[i], markersize=6, label="%s" % sid diff --git a/pyCHX/v2/_commonspeckle/XPCS_XSVS_SAXS_Multi_2017_V4.py b/pyCHX/v2/_commonspeckle/XPCS_XSVS_SAXS_Multi_2017_V4.py index aa327ae..c100e9e 100644 --- a/pyCHX/v2/_commonspeckle/XPCS_XSVS_SAXS_Multi_2017_V4.py +++ b/pyCHX/v2/_commonspeckle/XPCS_XSVS_SAXS_Multi_2017_V4.py @@ -26,10 +26,10 @@ def XPCS_XSVS_SAXS_Multi( run_two_time = run_pargs["run_two_time"] run_four_time = run_pargs["run_four_time"] run_xsvs = run_pargs["run_xsvs"] - ############################################################### + # if scat_geometry != "saxs": # to be done for other types run_xsvs = False - ############################################################### + # att_pdf_report = run_pargs["att_pdf_report"] show_plot = run_pargs["show_plot"] CYCLE = run_pargs["CYCLE"] @@ -68,7 +68,7 @@ def XPCS_XSVS_SAXS_Multi( data_dir_ = data_dir uid_ = uid_average - ### For Load results + # For Load results multi_res = {} for uid, fuid in zip(guids, fuids): @@ -528,7 +528,7 @@ def XPCS_XSVS_SAXS_Multi( export_xpcs_results_to_h5(uid + "_Res.h5", data_dir, export_dict=Exdt) # extract_dict = extract_xpcs_results_from_h5( filename = uid + '_Res.h5', import_dir = data_dir ) - ## Create PDF report for each uid + # Create PDF report for each uid pdf_out_dir = data_dir pdf_filename = "XPCS_Analysis_Report_for_%s%s.pdf" % (uid_average, pdf_version) if run_xsvs: @@ -551,7 +551,7 @@ def XPCS_XSVS_SAXS_Multi( run_xsvs, report_type=scat_geometry, ) - ### Attach each g2 result to the corresponding olog entry + # Attach each g2 result to the corresponding olog entry if att_pdf_report: os.environ["HTTPS_PROXY"] = "https://proxy:8888" os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" @@ -563,7 +563,7 @@ def XPCS_XSVS_SAXS_Multi( text="Add XPCS Averaged Analysis PDF Report", attachments=atch, ) - except: + except Exception: print( "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." % pname @@ -611,14 +611,14 @@ def XPCS_XSVS_SAXS_Multi( mask_path="/XF11ID/analysis/2016_3/masks/", mask_name="Nov28_4M_SAXS_mask.npy", good_start=5, - #####################################for saxs + # for saxs uniformq=True, inner_radius=0.005, # 0.005 for 50 nmAu/SiO2, 0.006, #for 10nm/coralpor outer_radius=0.04, # 0.04 for 50 nmAu/SiO2, 0.05, #for 10nm/coralpor num_rings=12, gap_ring_number=6, number_rings=1, - ############################for gi_saxs + # for gi_saxs # inc_x0 = 1473, # inc_y0 = 372, # refl_x0 = 1473, diff --git a/pyCHX/v2/_commonspeckle/__init__.py b/pyCHX/v2/_commonspeckle/__init__.py index a266959..3c3eaba 100644 --- a/pyCHX/v2/_commonspeckle/__init__.py +++ b/pyCHX/v2/_commonspeckle/__init__.py @@ -2,4 +2,4 @@ # # from ._version import get_versions # __version__ = get_versions()['version'] -# del get_versions +# diff --git a/pyCHX/v2/_commonspeckle/chx_Fitters2D.py b/pyCHX/v2/_commonspeckle/chx_Fitters2D.py index 852502e..b99c275 100644 --- a/pyCHX/v2/_commonspeckle/chx_Fitters2D.py +++ b/pyCHX/v2/_commonspeckle/chx_Fitters2D.py @@ -198,11 +198,11 @@ def __call__(self, XY, img, **kwargs): self.mod = Model(self.fitfunc, independent_vars=["XY"], param_names=self.params.keys()) # assumes first var is dependent var res = self.mod.fit(img.ravel(), XY=(XY[0].ravel(), XY[1].ravel()), params=params, **kwargs) - ## old version, only return values + # old version, only return values # add reduced chisq to parameter list # res.best_values['chisq']=res.redchi # return res.best_values - ## new version, also return the std + # new version, also return the std resf = {} ks = list(res.params.keys()) for var in ks: diff --git a/pyCHX/v2/_commonspeckle/chx_compress.py b/pyCHX/v2/_commonspeckle/chx_compress.py index f6c1bf3..00c7dd9 100644 --- a/pyCHX/v2/_commonspeckle/chx_compress.py +++ b/pyCHX/v2/_commonspeckle/chx_compress.py @@ -25,7 +25,7 @@ ) # from pyCHX.v2._commonspeckle.chx_libs import (np, roi, time, datetime, os, getpass, db, -# LogNorm, RUN_GUI) #common +# LogNorm, RUN_GUI) #common from pyCHX.v2._commonspeckle.chx_libs import RUN_GUI, LogNorm, datetime, getpass, np, os, roi, time # common # imports handler from CHX @@ -51,7 +51,7 @@ def pass_FD(FD, n): # FD.rdframe(n) try: FD.seekimg(n) - except: + except Exception: pass return False @@ -247,7 +247,7 @@ def read_compressed_eigerdata( else: try: mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) - except: + except Exception: CAL = True if CAL: FD = Multifile(filename, beg, end) @@ -385,8 +385,8 @@ def para_compress_eigerdata( print("No bad frames are involved.") print("Combining the seperated compressed files together...") combine_compressed(filename, Nf, del_old=True) - del results - del res_ + + if with_pickle: pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) if copy_rawdata: @@ -592,14 +592,14 @@ def segment_compress_eigerdata( fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) else: fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 - del p, v, img + fp.flush() fp.close() avg_img /= good_count bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) sys.stdout.write("#") sys.stdout.flush() - # del images, mask, avg_img, imgsum, bad_frame_list + # # print( 'Should release memory here') return mask, avg_img, imgsum, bad_frame_list @@ -913,7 +913,7 @@ def __init__(self, filename, beg, end, reverse=False): NOTE: At each record n, the file cursor points to record n+1 """ self.FID = open(filename, "rb") - # self.FID.seek(0,os.SEEK_SET) + # self.FID.seek(0,os.SEEK_SET) self.filename = filename # br: bytes read br = self.FID.read(1024) @@ -1397,8 +1397,8 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): for i in inputs: mean_intensity[:, i] = res[i] print("ROI mean_intensit calculation is DONE!") - del results - del res + + mean_intensity /= norm return mean_intensity, index diff --git a/pyCHX/v2/_commonspeckle/chx_correlation.py b/pyCHX/v2/_commonspeckle/chx_correlation.py index 2ef23d2..992cef4 100644 --- a/pyCHX/v2/_commonspeckle/chx_correlation.py +++ b/pyCHX/v2/_commonspeckle/chx_correlation.py @@ -1,27 +1,27 @@ -# ###################################################################### +# # # Developed at the NSLS-II, Brookhaven National Laboratory # -# # +# # # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # -# # +# # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # -# # +# # # * Redistributions of source code must retain the above copyright # -# notice, this list of conditions and the following disclaimer. # -# # +# notice, this list of conditions and the following disclaimer. # +# # # * Redistributions in binary form must reproduce the above copyright # -# notice this list of conditions and the following disclaimer in # -# the documentation and/or other materials provided with the # -# distribution. # -# # +# notice this list of conditions and the following disclaimer in # +# the documentation and/or other materials provided with the # +# distribution. # +# # # * Neither the name of the Brookhaven Science Associates, Brookhaven # -# National Laboratory nor the names of its contributors may be used # -# to endorse or promote products derived from this software without # -# specific prior written permission. # -# # +# National Laboratory nor the names of its contributors may be used # +# to endorse or promote products derived from this software without # +# specific prior written permission. # +# # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # @@ -34,7 +34,7 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # -######################################################################## +# """ This module is for functions specific to time correlation @@ -131,7 +131,7 @@ def _one_time_process( future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] @@ -672,7 +672,7 @@ def _two_time_process( img_per_level[level] += 1 # in multi-tau correlation other than first level all other levels - # have to do the half of the correlation + # have to do the half of the correlation if level == 0: i_min = 0 else: @@ -684,7 +684,7 @@ def _two_time_process( past_img = buf[level, delay_no] future_img = buf[level, buf_no] - # get the matrix of correlation function without normalizations + # get the matrix of correlation function without normalizations tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations pi_binned = np.bincount(label_array, weights=past_img)[1:] @@ -892,7 +892,7 @@ class CrossCorrelator: >> cimg = cc(img1) or, mask may m >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted diff --git a/pyCHX/v2/_commonspeckle/chx_correlationc.py b/pyCHX/v2/_commonspeckle/chx_correlationc.py index fb31982..b829715 100644 --- a/pyCHX/v2/_commonspeckle/chx_correlationc.py +++ b/pyCHX/v2/_commonspeckle/chx_correlationc.py @@ -87,7 +87,7 @@ def _one_time_process( past_img = buf[level, delay_no] future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images @@ -178,7 +178,7 @@ def _one_time_process_error( past_img = buf[level, delay_no] future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images @@ -187,13 +187,13 @@ def _one_time_process_error( norm[level + 1][ind] += 1 else: # for w, arr in zip([past_img*future_img, past_img, future_img], - # [G, past_intensity_norm, future_intensity_norm, - # ]): - # binned = np.bincount(label_array, weights=w)[1:] - # #nonz = np.where(w)[0] - # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] - # arr[t_index] += ((binned / num_pixels - - # arr[t_index]) / normalize) + # [G, past_intensity_norm, future_intensity_norm, + # ]): + # binned = np.bincount(label_array, weights=w)[1:] + # #nonz = np.where(w)[0] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # arr[t_index] += ((binned / num_pixels - + # arr[t_index]) / normalize) for w, arr in zip( [past_img * future_img, past_img, future_img], [ @@ -982,7 +982,7 @@ def lazy_two_time( norm=None, ): # def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, - # two_time_internal_state=None): + # two_time_internal_state=None): """Generator implementation of two-time correlation If you do not want multi-tau correlation, set num_levels to 1 and num_bufs to the number of images you wish to correlate @@ -1202,7 +1202,7 @@ def _two_time_process( img_per_level[level] += 1 # in multi-tau correlation other than first level all other levels - # have to do the half of the correlation + # have to do the half of the correlation if level == 0: i_min = 0 else: @@ -1216,7 +1216,7 @@ def _two_time_process( # print( np.sum( past_img ), np.sum( future_img )) - # get the matrix of correlation function without normalizations + # get the matrix of correlation function without normalizations tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations pi_binned = np.bincount(label_array, weights=past_img)[1:] @@ -1433,7 +1433,7 @@ def cal_g2c( g_max = min(g_max1, g_max2) # print(g_max) # g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * - # s.future_intensity[:g_max])) + # s.future_intensity[:g_max])) g2[:g_max, qi - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) g2_err[:g_max, qi - 1] = np.sqrt( (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 @@ -1508,9 +1508,9 @@ def __init__( if end is None: self.end = FD.end # if self.beg ==0: - # self.length = self.end - self.beg + # self.length = self.end - self.beg # else: - # self.length = self.end - self.beg + 1 + # self.length = self.end - self.beg + 1 self.length = self.end - self.beg @@ -1555,18 +1555,18 @@ def get_data(self): if self.mean_int_sets is not None: # for each frame will normalize each ROI by it's averaged value for j in range(noqs): # if i ==100: - # if j==0: - # print( self.mean_int_sets[i][j] ) - # print( qind_[ noprs[j]: noprs[j+1] ] ) + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] # if i==100: - # print( i, Mean_Int_Qind[ self.qind== 11 ]) + # print( i, Mean_Int_Qind[ self.qind== 11 ]) # print('Do norm_mean_int here') # if i ==10: - # print( norm_Mean_Int_Qind ) + # print( norm_Mean_Int_Qind ) else: norm_Mean_Int_Qind = 1.0 if self.imgsum is not None: @@ -1580,7 +1580,7 @@ def get_data(self): norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi # if i==100: - # print(norm_Mean_Int_Qind[:100]) + # print(norm_Mean_Int_Qind[:100]) data_array[n][pxlist] = v[w] / norms n += 1 @@ -1623,9 +1623,9 @@ def __init__( if end is None: self.end = FD.end # if self.beg ==0: - # self.length = self.end - self.beg + # self.length = self.end - self.beg # else: - # self.length = self.end - self.beg + 1 + # self.length = self.end - self.beg + 1 self.length = self.end - self.beg @@ -1669,18 +1669,18 @@ def get_data(self): if self.mean_int_sets is not None: # for normalization of each averaged ROI of each frame for j in range(noqs): # if i ==100: - # if j==0: - # print( self.mean_int_sets[i][j] ) - # print( qind_[ noprs[j]: noprs[j+1] ] ) + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] # if i==100: - # print( i, Mean_Int_Qind[ self.qind== 11 ]) + # print( i, Mean_Int_Qind[ self.qind== 11 ]) # print('Do norm_mean_int here') # if i ==10: - # print( norm_Mean_Int_Qind ) + # print( norm_Mean_Int_Qind ) else: norm_Mean_Int_Qind = 1.0 if self.imgsum is not None: @@ -1699,7 +1699,7 @@ def get_data(self): norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi # if i==100: - # print(norm_Mean_Int_Qind[:100]) + # print(norm_Mean_Int_Qind[:100]) data_array[n][pxlist] = v[w] / norms n += 1 @@ -1747,7 +1747,7 @@ def auto_two_Arrayc(data_pixel, rois, index=None): try: g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1814,7 +1814,7 @@ def auto_two_Arrayc_ExplicitNorm(data_pixel, rois, norm=None, index=None): try: g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1879,7 +1879,7 @@ def two_time_norm(data_pixel, rois, index=None): try: norm = np.zeros(len(qlist)) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1927,7 +1927,7 @@ def check_normalization(frame_num, q_list, imgsa, data_pixel): ) # plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], - # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) # print( mean_int_sets_[frame_num][q-1] ) plot1D( norm_data, diff --git a/pyCHX/v2/_commonspeckle/chx_correlationp.py b/pyCHX/v2/_commonspeckle/chx_correlationp.py index 87843cd..8478143 100644 --- a/pyCHX/v2/_commonspeckle/chx_correlationp.py +++ b/pyCHX/v2/_commonspeckle/chx_correlationp.py @@ -376,8 +376,8 @@ def cal_c12p( lag_steps = res[0][1] print("G2 calculation DONE!") - del results - del res + + return c12, lag_steps[lag_steps < noframes] @@ -633,7 +633,7 @@ def lazy_one_timep( g_max = min(g_max1, g_max2) g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) # sys.stdout.write('#') - # del FD + # # sys.stdout.flush() # print (g2) # return results(g2, s.lag_steps[:g_max], s) @@ -749,9 +749,9 @@ def cal_g2p( res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] len_lag = 10**10 for i in inputs: # to get the smallest length of lag_step, - ##***************************** - ##Here could result in problem for significantly cut useful data if some Q have very short tau list - ##**************************** + # ***************************** + # Here could result in problem for significantly cut useful data if some Q have very short tau list + # **************************** if len_lag > len(res[i][1]): lag_steps = res[i][1] len_lag = len(lag_steps) @@ -803,8 +803,8 @@ def cal_g2p( if len(lag_steps_err) < len(lag_stepsi): lag_steps_err = lag_stepsi - del results - del res + + if cal_error: print("G2 with error bar calculation DONE!") return g2[:Gmax, :], lag_steps_err[:Gmax], g2_err[:Gmax, :] / np.sqrt(nopr) @@ -914,8 +914,8 @@ def cal_GPF( g2_G[:, qind == 1 + i] = res[i][2] # [:len_lag] g2_P[:, qind == 1 + i] = res[i][3] # [:len_lag] g2_F[:, qind == 1 + i] = res[i][4] # [:len_lag] - del results - del res + + return g2_G, g2_P, g2_F @@ -937,12 +937,12 @@ def get_g2_from_ROI_GPF(G, P, F, roi_mask): g2 = np.zeros([G.shape[0], noqs]) g2_err = np.zeros([G.shape[0], noqs]) for i in range(1, 1 + noqs): - ## G[0].shape is the same as roi_mask shape + # G[0].shape is the same as roi_mask shape if len(G.shape) > 2: s_Gall_qi = G[:, roi_mask == i] s_Pall_qi = P[:, roi_mask == i] s_Fall_qi = F[:, roi_mask == i] - ## G[0].shape is the same length as pixelist + # G[0].shape is the same length as pixelist else: s_Gall_qi = G[:, qind == i] s_Pall_qi = P[:, qind == i] @@ -1025,7 +1025,7 @@ def auto_two_Arrayp(data_pixel, rois, index=None): # pool = Pool(processes= len(inputs) ) # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], - # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] # res = [r.get() for r in results] pool = Pool(processes=len(inputs)) diff --git a/pyCHX/v2/_commonspeckle/chx_correlationp2.py b/pyCHX/v2/_commonspeckle/chx_correlationp2.py index ca8c0f0..8dae060 100644 --- a/pyCHX/v2/_commonspeckle/chx_correlationp2.py +++ b/pyCHX/v2/_commonspeckle/chx_correlationp2.py @@ -360,8 +360,8 @@ def cal_c12p( lag_steps = res[0][1] print("G2 calculation DONE!") - del results - del res + + return c12, lag_steps[lag_steps < noframes] @@ -609,7 +609,7 @@ def lazy_one_timep( g_max = min(g_max1, g_max2) g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) # sys.stdout.write('#') - # del FD + # # sys.stdout.flush() # print (g2) # return results(g2, s.lag_steps[:g_max], s) @@ -714,9 +714,9 @@ def cal_g2p( res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] len_lag = 10**10 for i in inputs: # to get the smallest length of lag_step, - ##***************************** - ##Here could result in problem for significantly cut useful data if some Q have very short tau list - ##**************************** + # ***************************** + # Here could result in problem for significantly cut useful data if some Q have very short tau list + # **************************** if len_lag > len(res[i][1]): lag_steps = res[i][1] len_lag = len(lag_steps) @@ -775,8 +775,8 @@ def cal_g2p( g2_P[:, nopr_[i] : nopr_[i + 1]] = s_Pall_qi g2_F[:, nopr_[i] : nopr_[i + 1]] = s_Fall_qi - del results - del res + + if cal_error: print("G2 with error bar calculation DONE!") return ( @@ -842,7 +842,7 @@ def auto_two_Arrayp(data_pixel, rois, index=None): # pool = Pool(processes= len(inputs) ) # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], - # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] # res = [r.get() for r in results] pool = Pool(processes=len(inputs)) diff --git a/pyCHX/v2/_commonspeckle/chx_generic_functions.py b/pyCHX/v2/_commonspeckle/chx_generic_functions.py index fb6db14..5b3b487 100644 --- a/pyCHX/v2/_commonspeckle/chx_generic_functions.py +++ b/pyCHX/v2/_commonspeckle/chx_generic_functions.py @@ -858,7 +858,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 tifs = list(db[uid].data("OAV_image"))[0] try: pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) - except: + except Exception: pixel_scalebar = None print("No OAVS resolution is available.") @@ -882,7 +882,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 img = oavs[m] try: ind = np.flipud(img * scale)[:, :, 2] < threshold - except: + except Exception: ind = np.flipud(img * scale) < threshold rgb_cont_img = np.copy(np.flipud(img)) # rgb_cont_img[ind,0]=1000 @@ -1212,7 +1212,7 @@ def ps(y, shift=0.5, replot=True, logplot="off", x=None): PEAK_y = np.max(y) COM = np.sum(x * y) / np.sum(y) - ### from Maksim: assume this is a peak profile: + # from Maksim: assume this is a peak profile: def is_positive(num): return True if num > 0 else False @@ -1232,18 +1232,18 @@ def is_positive(num): ps.cen = CEN yf = ym # return { - # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), - # 'x_range': list_of_roots, + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, # } else: # ok, maybe it's a step function.. # print('no peak...trying step function...') ym = ym + shift - def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + def err_func(x, x0, k=2, A=1, base=0): # erf fit from Yugang return base - A * erf(k * (x - x0)) mod = Model(err_func) - ### estimate starting values: + # estimate starting values: x0 = np.mean(x) # k=0.1*(np.max(x)-np.min(x)) pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) @@ -1261,7 +1261,7 @@ def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang ps.fwhm = FWHM if replot: - ### re-plot results: + # re-plot results: if logplot == "on": fig, ax = plt.subplots() # plt.figure() ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") @@ -1289,7 +1289,7 @@ def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) # plt.show() - ### assign values of interest as function attributes: + # assign values of interest as function attributes: ps.peak = PEAK ps.com = COM return ps.cen @@ -1480,7 +1480,7 @@ def average_array_withNan(array, axis=0, mask=None): array_ = np.ma.masked_array(array, mask=mask) try: sums = np.array(np.ma.sum(array_[:, :], axis=axis)) - except: + except Exception: sums = np.array(np.ma.sum(array_[:], axis=axis)) cts = np.sum(~mask, axis=axis) @@ -1863,7 +1863,7 @@ def linear_fit(x, y, xrange=None): def find_index(x, x0, tolerance=None): """YG Octo 16,2017 copied from SAXS find index of x0 in x - #find the position of P in a list (plist) with tolerance + # find the position of P in a list (plist) with tolerance """ N = len(x) @@ -1880,13 +1880,13 @@ def find_index(x, x0, tolerance=None): def find_index_old(x, x0, tolerance=None): """YG Octo 16,2017 copied from SAXS find index of x0 in x - #find the position of P in a list (plist) with tolerance + # find the position of P in a list (plist) with tolerance """ N = len(x) i = 0 position = None - if tolerance == None: + if tolerance is None: tolerance = (x[1] - x[0]) / 2.0 if x0 > max(x): position = len(x) - 1 @@ -1991,7 +1991,7 @@ def sgolay2d(z, window_size, order, derivative=None): Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band) # solve system and convolve - if derivative == None: + if derivative is None: m = np.linalg.pinv(A)[0].reshape((window_size, -1)) return scipy.signal.fftconvolve(Z, m, mode="valid") elif derivative == "col": @@ -2039,7 +2039,7 @@ def extract_data_from_file( Or giving start_row: int good_cols: list of integer, good index of cols lables: the label of the good_cols - #save: False, if True will save the data into a csv file with filename appending csv ?? + # save: False, if True will save the data into a csv file with filename appending csv ?? Return: a pds.dataframe Example: @@ -2077,7 +2077,7 @@ def extract_data_from_file( else: temp = np.array([els[j] for j in good_cols], dtype=float) data = np.vstack((data, temp)) - except: + except Exception: pass if labels is None: labels = np.arange(data.shape[1]) @@ -2107,9 +2107,9 @@ def get_print_uids(start_time, stop_time, return_all_info=False): date = time.ctime(hdrs[-i - 1]["start"]["time"]) try: m = hdrs[-i - 1]["start"]["Measurement"] - except: + except Exception: m = "" - info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + info = "%3d: uid = '%s' #%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) print(info) if return_all_info: all_info[n] = info @@ -2251,7 +2251,7 @@ def validate_uid(uid): imgs = load_data(uid, md["detector"], reverse=True) print(imgs) return 1 - except: + except Exception: print("Can't load this uid=%s!" % uid) return 0 @@ -2399,7 +2399,7 @@ def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): return rm -## +# # Dev at March 31 for create Eiger chip mask def create_chip_edges_mask(det="1M"): """Create a chip edge mask for Eiger detector""" @@ -2451,7 +2451,7 @@ def create_folder(base_folder, sub_folder): """ data_dir0 = os.path.join(base_folder, sub_folder) - ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + # Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 @@ -2472,15 +2472,15 @@ def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") else: data_dir0 = os.path.join(default_dir, CYCLE + "/") - ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + # Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 -################################## -#########For dose analysis ####### -################################## +# +# For dose analysis # +# def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): """ Calculate the frame number to be correlated by giving a X-ray exposure dose @@ -2583,14 +2583,14 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm try: lambda_ = md["wavelength"] - except: + except Exception: lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms try: Ldet = md["det_distance"] if Ldet <= 1000: Ldet *= 1000 md["det_distance"] = Ldet - except: + except Exception: Ldet = md["detector_distance"] if Ldet <= 1000: Ldet *= 1000 @@ -2598,14 +2598,14 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * try: # try exp time from detector exposuretime = md["count_time"] # exposure time in sec - except: + except Exception: exposuretime = md["cam_acquire_time"] # exposure time in sec try: # try acq time from detector acquisition_period = md["frame_time"] - except: + except Exception: try: acquisition_period = md["acquire period"] - except: + except Exception: uid = md["uid"] acquisition_period = float(db[uid]["start"]["acquire period"]) timeperframe = acquisition_period @@ -2806,7 +2806,7 @@ def find_uids(start_time, stop_time): hdrs = db(start_time=start_time, stop_time=stop_time) try: print("Totally %s uids are found." % (len(list(hdrs)))) - except: + except Exception: pass sids = [] uids = [] @@ -3078,7 +3078,7 @@ def print_dict(dicts, keys=None): for k in keys: try: print("%s--> %s" % (k, dicts[k])) - except: + except Exception: pass @@ -3120,7 +3120,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md["suid"] = uid # short uid try: md["filename"] = get_sid_filenames(header)[2][0] - except: + except Exception: md["filename"] = "N.A." devices = sorted(list(header.devices())) @@ -3140,7 +3140,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): # detector_names = sorted( header.start['detectors'] ) detector_names = sorted(get_detectors(db[uid])) # if len(detector_names) > 1: - # raise ValueError("More than one det. This would have unintented consequences.") + # raise ValueError("More than one det. This would have unintented consequences.") detector_name = detector_names[0] # md['detector'] = detector_name md["detector"] = get_detector(header) @@ -3151,12 +3151,12 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md[newkey] = val # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): - # md[ k[len(dec)+1:] ]= v + # md[ k[len(dec)+1:] ]= v try: md.update(header.start["plan_args"].items()) md.pop("plan_args") - except: + except Exception: pass md.update(header.start.items()) @@ -3165,7 +3165,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) try: # added: try to handle runs that don't contain image data md["img_shape"] = header["descriptors"][0]["data_keys"][md["detector"]]["shape"][:2][::-1] - except: + except Exception: if verbose: print("couldn't find image shape...skip!") else: @@ -3174,7 +3174,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): # for k, v in sorted(md.items()): # ... - # print(f'{k}: {v}') + # print(f'{k}: {v}') return md @@ -3223,7 +3223,7 @@ def get_max_countc(FD, labeled_array): (p, v) = FD.rdrawframe(i) w = np.where(timg[p])[0] max_inten = max(max_inten, np.max(v[w])) - except: + except Exception: pass return max_inten @@ -3375,7 +3375,7 @@ def create_cross_mask( imy, imx = image.shape cx, cy = center bst_mask = np.zeros_like(image, dtype=bool) - ### + # # for right part wy = wy_right x = np.array([cx, imx, imx, cx]) @@ -3383,7 +3383,7 @@ def create_cross_mask( rr, cc = polygon(y, x) bst_mask[rr, cc] = 1 - ### + # # for left part wy = wy_left x = np.array([0, cx, cx, 0]) @@ -3391,7 +3391,7 @@ def create_cross_mask( rr, cc = polygon(y, x) bst_mask[rr, cc] = 1 - ### + # # for up part wx = wx_up x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) @@ -3399,7 +3399,7 @@ def create_cross_mask( rr, cc = polygon(y, x) bst_mask[rr, cc] = 1 - ### + # # for low part wx = wx_down x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) @@ -3462,7 +3462,7 @@ def export_scan_scalar( return datap -##### +# # load data by databroker @@ -3646,7 +3646,7 @@ def load_data2(uid, detector="eiger4m_single_image"): try: (ev,) = hdr.events(fields=[detector]) flag = 0 - except: + except Exception: flag += 1 print("Trying again ...!") @@ -3791,8 +3791,8 @@ def RemoveHot(img, threshold=1e7, plot_=True): return mask -############ -###plot data +# +# plot data def show_img( @@ -3995,34 +3995,34 @@ def plot1D( legend = " " try: logx = kwargs["logx"] - except: + except Exception: logx = False try: logy = kwargs["logy"] - except: + except Exception: logy = False try: logxy = kwargs["logxy"] - except: + except Exception: logxy = False - if logx == True and logy == True: + if logx and logy: logxy = True try: marker = kwargs["marker"] - except: + except Exception: try: marker = kwargs["m"] - except: + except Exception: marker = next(markers_) try: color = kwargs["color"] - except: + except Exception: try: color = kwargs["c"] - except: + except Exception: color = next(colors_) if x is None: @@ -4076,7 +4076,7 @@ def plot1D( title = "plot" ax.set_title(title) # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') - if (legend != "") and (legend != None): + if (legend != "") and (legend is not None): ax.legend(loc="best", fontsize=legend_size) if "save" in kwargs.keys(): if kwargs["save"]: @@ -4089,7 +4089,7 @@ def plot1D( return fig -### +# def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): @@ -5165,7 +5165,7 @@ def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res return df -########### +# # *for g2 fit and plot @@ -5203,7 +5203,7 @@ def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): """Nov 9, 2017 Basically, make q vector to (qr, angle), - ###relaxation_rate is actually a diffusion rate + # relaxation_rate is actually a diffusion rate flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) Diffusion part: np.exp( -2*D q^2 *tau ) q_ang: would be np.radians( ang - 90 ) @@ -5524,7 +5524,7 @@ def get_g2_fit_general( try: if isinstance(_guess_val[k], (np.ndarray, list)): pars[k].value = _guess_val[k][i] - except: + except Exception: pass if True: @@ -5563,7 +5563,7 @@ def get_g2_fit_general( pars["%s" % v].vary = False # if i==20: - # print(pars) + # print(pars) # print( pars ) result1 = mod.fit(y, pars, x=lags) # print(qval_dict[i][0], qval_dict[i][1], y) @@ -5665,9 +5665,9 @@ def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): ) -############################################ -##a good func to plot g2 for all types of geogmetries -############################################ +# +# a good func to plot g2 for all types of geogmetries +# def plot_g2_general( @@ -5759,7 +5759,7 @@ def plot_g2_general( for k in list(g2_dict.keys()): g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] # for k in list(taus_dict.keys()): - # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] taus_dict_ = taus_dict qval_dict_ = {k: qval_dict[k] for k in qth_interest} if fit_res is not None: @@ -5797,8 +5797,8 @@ def plot_g2_general( ind_long_i = ind_long[s_ind] num_long_i = len(ind_long_i) # if show_average_ang_saxs: - # if geometry=='ang_saxs': - # num_long_i += 1 + # if geometry=='ang_saxs': + # num_long_i += 1 if RUN_GUI: fig = Figure(figsize=(10, 12)) else: @@ -5869,8 +5869,8 @@ def plot_g2_general( for i, l_ind in enumerate(ind_long_i): if num_long_i <= max_plotnum_fig: # if s_ind ==2: - # print('Here') - # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) ax = fig.add_subplot(sx, sy, i + 1) if sx == 1: if sy == 1: @@ -5936,7 +5936,7 @@ def plot_g2_general( dumy = g2_dict_[k].shape # print( 'here is the shape' ) islist = False - except: + except Exception: islist_n = len(g2_dict_[k]) islist = True # print( 'here is the list' ) @@ -6121,7 +6121,7 @@ def plot_g2_general( vmin, vmax = kwargs["vlim"] try: ax.set_ylim([ymin * vmin, ymax * vmax]) - except: + except Exception: pass else: pass @@ -6142,7 +6142,7 @@ def plot_g2_general( # print(fig) try: plt.savefig(fp + ".png", dpi=fig.dpi) - except: + except Exception: print("Can not save figure here.") else: diff --git a/pyCHX/v2/_commonspeckle/chx_handlers.py b/pyCHX/v2/_commonspeckle/chx_handlers.py index 998ce9c..426c679 100644 --- a/pyCHX/v2/_commonspeckle/chx_handlers.py +++ b/pyCHX/v2/_commonspeckle/chx_handlers.py @@ -1,5 +1,5 @@ -###Copied from chxtools/chxtools/handlers.py -###https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py +# Copied from chxtools/chxtools/handlers.py +# https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py # handler registration and database instantiation should be done diff --git a/pyCHX/v2/_commonspeckle/chx_libs.py b/pyCHX/v2/_commonspeckle/chx_libs.py index ba0fef5..2c845a6 100644 --- a/pyCHX/v2/_commonspeckle/chx_libs.py +++ b/pyCHX/v2/_commonspeckle/chx_libs.py @@ -31,13 +31,13 @@ # from chxtools.handlers import EigerHandler # from eiger_io.fs_handler import EigerHandler #common # from databroker.assets.path_only_handlers import RawHandler #common -## Import all the required packages for Data Analysis +# Import all the required packages for Data Analysis # from databroker import Broker #common # db = Broker.named('chx') #common # * scikit-beam - data analysis tools for X-ray science -# - https://github.com/scikit-beam/scikit-beam +# - https://github.com/scikit-beam/scikit-beam # * xray-vision - plotting helper functions for X-ray science -# - https://github.com/Nikea/xray-vision +# - https://github.com/Nikea/xray-vision import xray_vision import xray_vision.mpl_plotting as mpl_plot from IPython.core.magics.display import Javascript @@ -368,7 +368,7 @@ # colors_ = itertools.cycle(sorted_colors_ ) markers_ = itertools.cycle(markers) # Custom colormaps -################################################################################ +# # ROYGBVR but with Cyan-Blue instead of Blue color_list_cyclic_spectrum = [ [1.0, 0.0, 0.0], diff --git a/pyCHX/v2/_commonspeckle/chx_speckle.py b/pyCHX/v2/_commonspeckle/chx_speckle.py index 75ab068..8075a2e 100644 --- a/pyCHX/v2/_commonspeckle/chx_speckle.py +++ b/pyCHX/v2/_commonspeckle/chx_speckle.py @@ -151,7 +151,7 @@ def xsvs( try: noframes = len(images) - except: + except Exception: noframes = images.length # Num= { key: [0]* len( dict_dly[key] ) for key in list(dict_dly.keys()) } @@ -413,9 +413,9 @@ def get_bin_edges(num_times, num_rois, mean_roi, max_cts): return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers -################# -##for fit -################### +# +# for fit +# from scipy import stats from scipy.special import gamma, gammaln @@ -505,7 +505,7 @@ def nbinom_dist(bin_values, K, M): return nbinom -#########poisson +# poisson def poisson(x, K): """Poisson distribution function. K is average photon counts @@ -741,7 +741,7 @@ def fit_xsvs1( axes.set_xlabel("K/") axes.set_ylabel("P(K)") - # Using the best K and M values interpolate and get more values for fitting curve + # Using the best K and M values interpolate and get more values for fitting curve fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) if func == "bn": @@ -883,7 +883,7 @@ def plot_xsvs_g2(g2, taus, res_pargs=None, *argv, **kwargs): # plt.show() -###########################3 +# 3 # @@ -999,7 +999,7 @@ def get_xsvs_fit( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -1014,7 +1014,7 @@ def get_xsvs_fit( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) diff --git a/pyCHX/v2/_commonspeckle/chx_specklecp.py b/pyCHX/v2/_commonspeckle/chx_specklecp.py index 771e51f..d0692be 100644 --- a/pyCHX/v2/_commonspeckle/chx_specklecp.py +++ b/pyCHX/v2/_commonspeckle/chx_specklecp.py @@ -248,8 +248,8 @@ def xsvsp_single( ) print("Histogram calculation DONE!") - del results - del res + + return bin_edges, prob_k, prob_k_std_dev, his_sum @@ -566,7 +566,7 @@ def xsvsc_single( processing = 0 # print( level ) # prob_k_std_dev = np.power((prob_k_pow - - # np.power(prob_k, 2)), .5) + # np.power(prob_k, 2)), .5) for i in range(num_times): for j in range(num_roi): @@ -577,8 +577,8 @@ def xsvsc_single( prob_k[i, j] = prob_k[i, j] / his_sum[i, j] # for i in range(num_times): - # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): - # pass + # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): + # pass return bin_edges, prob_k, prob_k_std_dev, his_sum @@ -630,9 +630,9 @@ def _process( track_bad_level[level] += 1 # print (img_per_level,track_bad_level) u_labels = list(np.unique(labels)) - ############## - ##To Do list here, change histogram to bincount - ##Change error bar calculation + # + # To Do list here, change histogram to bincount + # Change error bar calculation if not (np.isnan(data).any()): for j, label in enumerate(u_labels): roi_data = data[labels == label] @@ -643,12 +643,12 @@ def _process( spe_hist = np.nan_to_num(spe_hist) # print( spe_hist.shape ) # prob_k[level, j] += (spe_hist - - # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) + # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) # print( prob_k[level, j] ) prob_k[level, j] += spe_hist # print( spe_hist.shape, prob_k[level, j] ) # prob_k_pow[level, j] += (np.power(spe_hist, 2) - - # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) + # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) def normalize_bin_edges(num_times, num_rois, mean_roi, max_cts): @@ -892,15 +892,15 @@ def get_bin_edges(num_times, num_rois, mean_roi, max_cts): return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers -################# -##for fit -################### +# +# for fit +# from scipy import stats from scipy.special import gamma, gammaln -###########################3 -##Dev at Nov 18, 2016 +# 3 +# Dev at Nov 18, 2016 # @@ -950,8 +950,8 @@ def nbinomres(p, hist, x, hist_err=None, N=1): return err -########### -##Dev at Octo 12, 2017 +# +# Dev at Octo 12, 2017 def nbinom(p, x, mu): @@ -1103,7 +1103,7 @@ def get_xsvs_fit( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(kmean_guess) # resultL[0][0] ) + KL_val[i].append(kmean_guess) # resultL[0][0] ) else: # vary M and K fit_func = nbinomlog @@ -1118,7 +1118,7 @@ def get_xsvs_fit( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) @@ -1151,9 +1151,9 @@ def plot_xsvs_fit( """ # if qth is None: - # fig = plt.figure(figsize=(10,12)) + # fig = plt.figure(figsize=(10,12)) # else: - # fig = plt.figure(figsize=(8,8)) + # fig = plt.figure(figsize=(8,8)) max_cts = spe_cts_all[0][0].shape[0] - 1 num_times, num_rings = spe_cts_all.shape @@ -1543,7 +1543,7 @@ def get_xsvs_fit_old( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -1558,13 +1558,13 @@ def get_xsvs_fit_old( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) # if max_bins==2: - # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) - # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) + # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) + # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) return ML_val, KL_val, np.array(K_) @@ -1653,7 +1653,7 @@ def nbinom_dist(bin_values, K, M): return nbinom -#########poisson +# poisson def poisson(x, K): """Poisson distribution function. K is average photon counts @@ -1880,7 +1880,7 @@ def fit_xsvs1( axes.set_xlabel("K/") axes.set_ylabel("P(K)") - # Using the best K and M values interpolate and get more values for fitting curve + # Using the best K and M values interpolate and get more values for fitting curve fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) if func == "bn": @@ -2100,7 +2100,7 @@ def get_xsvs_fit_old1( ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -2119,7 +2119,7 @@ def get_xsvs_fit_old1( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) diff --git a/pyCHX/v2/_commonspeckle/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/v2/_commonspeckle/chx_xpcs_xsvs_jupyter_V1.py index e9b8876..d462ff4 100644 --- a/pyCHX/v2/_commonspeckle/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/v2/_commonspeckle/chx_xpcs_xsvs_jupyter_V1.py @@ -377,9 +377,9 @@ def plot_entries_from_uids( return fig, ax -#################################################################################################### -##For real time analysis## -################################################################################################# +# +# For real time analysis# +# def get_iq_from_uids(uids, mask, setup_pargs): @@ -502,7 +502,7 @@ def wait_data_acquistion_finish(uid, wait_time=2, max_try_num=3): FINISH = True print("The data acquistion finished.") print("Starting to do something here...") - except: + except Exception: wait_func(wait_time=wait_time) w += 1 print("Try number: %s" % w) @@ -598,7 +598,7 @@ def do_compress_on_line(start_time, stop_time, mask_dict=None, mask=None, wait_t text="Data are on-line sparsified!", attachments=None, ) - except: + except Exception: print("There are something wrong with this data: %s..." % uid) print("*" * 50) return time.time() - t0 @@ -644,14 +644,14 @@ def realtime_xpcs_analysis( if finish: try: md = get_meta_data(uid) - ##corect some metadata + # corect some metadata if md_update is not None: md.update(md_update) # if 'username' in list(md.keys()): # try: - # md_cor['username'] = md_update['username'] - # except: - # md_cor = None + # md_cor['username'] = md_update['username'] + # except Exception: + # md_cor = None # uid = uid[:8] # print(md_cor) if not emulation: @@ -664,7 +664,7 @@ def realtime_xpcs_analysis( clear_plot=clear_plot, ) # update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) - except: + except Exception: print("There are something wrong with this data: %s..." % uid) else: print("\nThis is not a XPCS series. We will simiply ignore it.") @@ -676,9 +676,9 @@ def realtime_xpcs_analysis( return time.time() - t0 -#################################################################################################### -##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## -################################################################################################# +# +# compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress# +# def compress_multi_uids( uids, mask, @@ -750,9 +750,9 @@ def compress_multi_uids( print("Done!") -#################################################################################################### -##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## -################################################################################################# +# +# get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid # +# def get_two_time_mulit_uids( @@ -820,7 +820,7 @@ def get_two_time_mulit_uids( data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) np.save(filename, g12b) - del g12b + print("The two time correlation function for uid={} is saved as {}.".format(uid, filename)) @@ -971,17 +971,17 @@ def get_series_one_time_mulit_uids( try: g2_path = path + uid + "/" g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) - except: + except Exception: g2_path = path + md["uid"] + "/" g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) try: exp_time = float(md["cam_acquire_time"]) # *1000 #from second to ms - except: + except Exception: exp_time = float(md["exposure time"]) # * 1000 #from second to ms if trans is None: try: transi = md["transmission"] - except: + except Exception: transi = [1] else: transi = trans[i] @@ -1139,7 +1139,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_fit_form = False, run_waterfall = True,#False, run_t_ROI_Inten = True, - #run_fit_g2 = True, + # run_fit_g2 = True, fit_g2_func = 'stretched', run_one_time = True,#False, run_two_time = True,#False, @@ -1159,8 +1159,8 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= num_rings = 12, gap_ring_number = 6, number_rings= 1, - #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 - #width = 0.0002 + # qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + # width = 0.0002 qth_interest = 1, #the intested single qth use_sqnorm = False, use_imgsum_norm = True, @@ -1187,21 +1187,21 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_xsvs = run_pargs["run_xsvs"] try: run_dose = run_pargs["run_dose"] - except: + except Exception: run_dose = False - ############################################################### + # if scat_geometry == "gi_saxs": # to be done for other types run_xsvs = False - ############################################################### + # - ############################################################### + # if scat_geometry == "ang_saxs": run_xsvs = False run_waterfall = False run_two_time = False run_four_time = False run_t_ROI_Inten = False - ############################################################### + # if "bin_frame" in list(run_pargs.keys()): bin_frame = run_pargs["bin_frame"] bin_frame_number = run_pargs["bin_frame_number"] @@ -1219,12 +1219,12 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= use_imgsum_norm = run_pargs["use_imgsum_norm"] try: use_sqnorm = run_pargs["use_sqnorm"] - except: + except Exception: use_sqnorm = False try: inc_x0 = run_pargs["inc_x0"] inc_y0 = run_pargs["inc_y0"] - except: + except Exception: inc_x0 = None inc_y0 = None @@ -1267,7 +1267,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= try: username = run_pargs["username"] - except: + except Exception: username = getpass.getuser() data_dir0 = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") @@ -1337,7 +1337,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= "beam_center_y", ], ) - ## Overwrite Some Metadata if Wrong Input + # Overwrite Some Metadata if Wrong Input dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( md, Nimg, inc_x0=inc_x0, inc_y0=inc_y0, pixelsize=7.5 * 10 * (-5) ) @@ -1437,7 +1437,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= mask = mask * Chip_Mask # %system free && sync && echo 3 > /proc/sys/vm/drop_caches && free - ## Get bad frame list by a polynominal fit + # Get bad frame list by a polynominal fit bad_frame_list = get_bad_frame_list( imgsum, fit=True, @@ -1450,7 +1450,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= ) print("The bad frame list length is: %s" % len(bad_frame_list)) - ### Creat new mask by masking the bad pixels and get new avg_img + # Creat new mask by masking the bad pixels and get new avg_img if False: mask = mask_exclude_badpixel(bp, mask, md["uid"]) avg_img = get_avg_imgc(FD, sampling=1, bad_frame_list=bad_frame_list) @@ -1485,16 +1485,16 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, ) - ############for SAXS and ANG_SAXS (Flow_SAXS) + # for SAXS and ANG_SAXS (Flow_SAXS) if scat_geometry == "saxs" or scat_geometry == "ang_saxs": # show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, - # image_name= uidstr + '_img_avg', save=True) + # image_name= uidstr + '_img_avg', save=True) # np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) # try: - # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) - # except: - # hmask=1 + # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) + # except Exception: + # hmask=1 hmask = 1 qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, @@ -1513,7 +1513,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= ) # pd = trans_data_to_pd( np.where( hmask !=1), - # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') # pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) @@ -1634,9 +1634,9 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= qrt_pds = get_t_qrc(FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid=uidstr) plot_qrt_pds(qrt_pds, time_edge, qz_index=0, uid=uidstr, path=data_dir) - ############################## - ##the below works for all the geometries - ######################################## + # + # the below works for all the geometries + # if scat_geometry != "ang_saxs": roi_inten = check_ROI_intensity( avg_img, @@ -1764,7 +1764,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= # if run_one_time: # plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, - # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') plot_g2_general( g2_dict={1: g2, 2: g2_fit}, @@ -2119,7 +2119,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= N = len(imgs) try: tr = md["transmission"] - except: + except Exception: tr = 1 if "dose_frame" in list(run_pargs.keys()): dose_frame = run_pargs["dose_frame"] @@ -2167,7 +2167,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= times_xsvs = exposuretime + (2 ** (np.arange(len(time_steps))) - 1) * timeperframe print("The max counts are: %s" % max_cts) - ### Do historam + # Do historam if roi_avg is None: times_roi, mean_int_sets = cal_each_ring_mean_intensityc( FD, @@ -2258,7 +2258,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, ) - ### Get contrast + # Get contrast contrast_factorL = get_contrast(ML_val) spec_km_pds = save_KM( spec_kmean, @@ -2514,7 +2514,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_dose=run_dose, report_type=scat_geometry, ) - ## Attach the PDF report to Olog + # Attach the PDF report to Olog if att_pdf_report: os.environ["HTTPS_PROXY"] = "https://proxy:8888" os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" @@ -2522,7 +2522,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= atch = [Attachment(open(pname, "rb"))] try: update_olog_uid(uid=md["uid"], text="Add XPCS Analysis PDF Report", attachments=atch) - except: + except Exception: print( "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." % pname @@ -2531,7 +2531,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= if show_plot: plt.show() # else: - # plt.close('all') + # plt.close('all') if clear_plot: plt.close("all") if return_res: diff --git a/pyCHX/v2/_commonspeckle/movie_maker.py b/pyCHX/v2/_commonspeckle/movie_maker.py index bade9de..ac2a91a 100644 --- a/pyCHX/v2/_commonspeckle/movie_maker.py +++ b/pyCHX/v2/_commonspeckle/movie_maker.py @@ -1,6 +1,6 @@ -################################ -######Movie_maker############### -################################ +# +# Movie_maker# +# def read_imgs(inDir): @@ -33,7 +33,7 @@ def select_regoin( try: img_[ys:ye, xs:xe] = True - except: + except Exception: img_[ys:ye, xs:xe, :] = True pixellist_ = np.where(img_.ravel())[0] # pixellist_ = img_.ravel() @@ -49,7 +49,7 @@ def select_regoin( else: try: imgx = img[ys:ye, xs:xe] - except: + except Exception: imgx = img[ys:ye, xs:xe, :] return imgx @@ -89,7 +89,7 @@ def save_png_series( save png files """ - if uid == None: + if uid is None: uid = "uid" num_frame = 0 for img in imgs: @@ -191,8 +191,8 @@ def movie_maker( Returns ------- - #ani : - # movie + # ani : + # movie """ diff --git a/pyCHX/v2/_commonspeckle/xpcs_timepixel.py b/pyCHX/v2/_commonspeckle/xpcs_timepixel.py index 6c594a9..7d8dd2a 100644 --- a/pyCHX/v2/_commonspeckle/xpcs_timepixel.py +++ b/pyCHX/v2/_commonspeckle/xpcs_timepixel.py @@ -69,7 +69,7 @@ def get_timepixel_data(data_dir, filename, time_unit=1): """ data = pds.read_csv(data_dir + filename) - #'#Col', ' #Row', ' #ToA', + # '#Col', ' #Row', ' #ToA', # return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps if time_unit != 1: try: @@ -78,7 +78,7 @@ def get_timepixel_data(data_dir, filename, time_unit=1): np.array(data["#Row"]), np.array(data["#ToA"]) * time_unit, ) - except: + except Exception: x, y, t = ( np.array(data["#Col"]), np.array(data[" #Row"]), @@ -91,7 +91,7 @@ def get_timepixel_data(data_dir, filename, time_unit=1): np.array(data["#Row"]), np.array(data["#ToA"]), ) - except: + except Exception: x, y, t = ( np.array(data["#Col"]), np.array(data[" #Row"]), @@ -175,7 +175,7 @@ def get_FD_end_num(FD, maxend=1e10): for i in range(0, int(maxend)): try: FD.seekimg(i) - except: + except Exception: N = i break FD.seekimg(0) @@ -313,8 +313,8 @@ def init_compress_timepix_data(pos, t, binstep, filename, mask=None, md=None, no # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -408,8 +408,8 @@ def init_compress_timepix_data_light_duty( # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -496,8 +496,8 @@ def compress_timepix_data_old(data_pixel, filename, rois=None, md=None, nobytes= # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -548,8 +548,8 @@ def __init__( """ indexable: a images sequences pixelist: 1-D array, interest pixel list - #flat_correction, normalized by flatfield - #norm, normalized by total intensity, like a incident beam intensity + # flat_correction, normalized by flatfield + # norm, normalized by total intensity, like a incident beam intensity """ self.hitime = hitime self.tbins = tbins @@ -728,7 +728,7 @@ def get_timepixel_g2(oned_count): return np.correlate(oned_count, oned_count, mode="full")[-n:] / norm -######################################### +# T = True F = False @@ -816,7 +816,7 @@ def make_qlist(self): qlist[1::2] = round(qradi + (1 + qwidth) / 2) # render odd value qlist[::2] = int_(qradi - qwidth / 2) # render even value qlist[1::2] = int_(qradi + (1 + qwidth) / 2) # render odd value - if qlist_ != None: + if qlist_ is not None: qlist = qlist_ return qlist, qradi @@ -864,9 +864,9 @@ def calqlist(self, qmask=None, shape="circle"): nopr, bins = histogram(qind, bins=range(len(qradi) + 1)) return qind, pixellist, nopr, nopixels - ########################################################################### - ########for one_time correlation function for xyt frames - ################################################################## + # + # for one_time correlation function for xyt frames + # def autocor_xytframe(self, n): """Do correlation for one xyt frame--with data name as n""" @@ -932,7 +932,7 @@ def show(self, g2p, title): plt.show() -###################################################### +# if False: xp = xpcs() @@ -943,6 +943,6 @@ def show(self, g2p, title): g2 = xp.autocor(fnum) filename = "g2_-%s-" % (fnum) save(RES_DIR + FOUT + filename, g2) - ##g2= load(RES_DIR + FOUT + filename +'.npy') + # g2= load(RES_DIR + FOUT + filename +'.npy') g2p = xp.g2_to_pds(dly, g2, tscale=20) xp.show(g2p, "g2_run_%s" % fnum) diff --git a/pyCHX/v2/_futurepyCHX/Badpixels.py b/pyCHX/v2/_futurepyCHX/Badpixels.py index c90714a..f8daf6f 100644 --- a/pyCHX/v2/_futurepyCHX/Badpixels.py +++ b/pyCHX/v2/_futurepyCHX/Badpixels.py @@ -89,11 +89,11 @@ 4155535, ] ), # 57 points, coralpor - "6cc34a": np.array([1058942, 2105743, 2105744, 2107813, 2107815, 2109883, 4155535]), # coralpor + "6cc34a": np.array([1058942, 2105743, 2105744, 2107813, 2107815, 2109883, 4155535]), # coralpor } -## Create during 2018 Cycle 1 +# Create during 2018 Cycle 1 BadPix_4M = np.array( [ 828861, diff --git a/pyCHX/v2/_futurepyCHX/Create_Report.py b/pyCHX/v2/_futurepyCHX/Create_Report.py index 4c7a560..34dca8f 100644 --- a/pyCHX/v2/_futurepyCHX/Create_Report.py +++ b/pyCHX/v2/_futurepyCHX/Create_Report.py @@ -67,8 +67,8 @@ def add_image_string( height = img_height width = height / ratio # if width>400: - # width = 350 - # height = width*ratio + # width = 350 + # height = width*ratio c.drawImage(image, img_left, img_top, width=width, height=height, mask=None) c.setFont("Helvetica", 16) @@ -222,7 +222,7 @@ def load_metadata(self): else: uid_TwoTime = uid + "_fra_%s_%s" % (beg_TwoTime, end_TwoTime) - except: + except Exception: uid_ = uid uid_OneTime = uid if beg is None: @@ -475,7 +475,7 @@ def report_meta(self, top=740, new_page=False): md = self.md try: uid = md["uid"] - except: + except Exception: uid = self.uid # add sub-title, metadata c.setFont("Helvetica", 20) @@ -513,25 +513,25 @@ def report_meta(self, top=740, new_page=False): try: # try exp time from detector exposuretime = md["count_time"] # exposure time in sec - except: + except Exception: exposuretime = md["cam_acquire_time"] # exposure time in sec try: # try acq time from detector acquisition_period = md["frame_time"] - except: + except Exception: try: acquisition_period = md["acquire period"] - except: + except Exception: uid = md["uid"] acquisition_period = float(db[uid]["start"]["acquire period"]) s = [] - s.append("UID: %s" % uid) ###line 1, for uid - s.append("Sample: %s" % md["sample"]) ####line 2 sample + s.append("UID: %s" % uid) #line 1, for uid + s.append("Sample: %s" % md["sample"]) #line 2 sample s.append( "Data Acquisition From: %s To: %s" % (md["start_time"], md["stop_time"]) - ) ####line 3 Data Acquisition time - s.append("Measurement: %s" % md["Measurement"]) ####line 4 'Measurement + ) #line 3 Data Acquisition time + s.append("Measurement: %s" % md["Measurement"]) #line 4 'Measurement # print( md['incident_wavelength'], int(md['number of images']), md['detector_distance'], md['feedback_x'], md['feedback_y'], md['shutter mode'] ) # print(acquisition_period) @@ -543,7 +543,7 @@ def report_meta(self, top=740, new_page=False): round(float(exposuretime) * 1000, 4), round(float(acquisition_period) * 1000, 4), ) - ) ####line 5 'lamda... + ) #line 5 'lamda... s.append( "Detector-Sample Distance: %s m| FeedBack Mode: x -> %s & y -> %s| Shutter Mode: %s" @@ -553,7 +553,7 @@ def report_meta(self, top=740, new_page=False): md["feedback_y"], md["shutter mode"], ) - ) ####line 6 'Detector-Sample Distance.. + ) #line 6 'Detector-Sample Distance.. if self.report_type == "saxs": s7 = "Beam Center: [%s, %s] (pixel)" % ( md["beam_center_x"], @@ -575,22 +575,22 @@ def report_meta(self, top=740, new_page=False): s7 += " || " + "BadLen: %s" % len(md["bad_frame_list"]) s7 += " || " + "Transmission: %s" % md["transmission"] - s.append(s7) ####line 7 'Beam center... + s.append(s7) #line 7 'Beam center... m = "Mask file: %s" % md["mask_file"] + " || " + "ROI mask file: %s" % md["roi_mask_file"] - # s.append( 'Mask file: %s'%md['mask_file'] ) ####line 8 mask filename - # s.append( ) ####line 8 mask filename + # s.append( 'Mask file: %s'%md['mask_file'] ) #line 8 mask filename + # s.append( ) #line 8 mask filename s.append(m) if self.res_h5_filename is not None: self.data_dir_ = self.data_dir + self.res_h5_filename else: self.data_dir_ = self.data_dir - s.append("Analysis Results Dir: %s" % self.data_dir_) ####line 9 results folder + s.append("Analysis Results Dir: %s" % self.data_dir_) #line 9 results folder - s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) ####line 10 metadata folder + s.append("Metadata Dir: %s.csv-&.pkl" % self.metafile) #line 10 metadata folder try: - s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) ####line 11 notebook folder - except: + s.append("Pipeline notebook: %s" % md["NOTEBOOK_FULL_PATH"]) #line 11 notebook folder + except Exception: pass # print( 'here' ) line = 1 @@ -1812,7 +1812,7 @@ def save_res_h5(full_uid, data_dir, save_two_time=False): for key in md.keys(): try: meta_data.attrs[key] = md[key] - except: + except Exception: pass shapes = md["avg_img"].shape @@ -1912,7 +1912,7 @@ def make_pdf_report( c.report_static(top=540, iq_fit=run_fit_form) c.report_ROI(top=290) page = 1 - ##Page Two for plot OVAS images if oavs_report is True + # Page Two for plot OVAS images if oavs_report is True if oavs_report: c.new_page() c.report_header(page=2) @@ -1980,8 +1980,8 @@ def make_pdf_report( return c -###################################### -###Deal with saving dict to hdf5 file +# +# Deal with saving dict to hdf5 file def save_dict_to_hdf5(dic, filename): """ .... @@ -2027,7 +2027,7 @@ def recursively_save_dict_contents_to_group(h5file, path, dic): elif isinstance(item, np.ndarray): try: h5file[path + key] = item - except: + except Exception: item = np.array(item).astype("|S9") h5file[path + key] = item if not np.array_equal(h5file[path + key].value, item): @@ -2078,13 +2078,13 @@ def export_xpcs_results_to_h5(filename, export_dir, export_dict): for key_ in md.keys(): try: meta_data.attrs[str(key_)] = md[key_] - except: + except Exception: pass elif key in dict_nest: # print(key) try: recursively_save_dict_contents_to_group(hf, "/%s/" % key, export_dict[key]) - except: + except Exception: print("Can't export the key: %s in this dataset." % key) elif key in [ @@ -2100,7 +2100,7 @@ def export_xpcs_results_to_h5(filename, export_dir, export_dict): key=key, mode="a", ) - except: + except Exception: flag = True else: data = hf.create_dataset(key, data=export_dict[key]) @@ -2185,7 +2185,7 @@ def extract_xpcs_results_from_h5_debug(filename, import_dir, onekey=None, exclud try: with h5py.File(fp, "r") as hf: extract_dict[onekey] = np.array(hf.get(onekey)) - except: + except Exception: print("The %s dosen't have this %s value" % (fp, onekey)) return extract_dict @@ -2213,7 +2213,7 @@ def export_xpcs_results_to_h5_old(filename, export_dir, export_dict): for key_ in md.keys(): try: meta_data.attrs[str(key_)] = md[key_] - except: + except Exception: pass elif key in dict_nest: k1 = export_dict[key] @@ -2320,7 +2320,7 @@ def extract_xpcs_results_from_h5(filename, import_dir, onekey=None, exclude_keys else: extract_dict[key] = hf.get(key)[:] # np.array( hf.get( key )) # extract_dict[onekey] = hf.get( key )[:] #np.array( hf.get( onekey )) - except: + except Exception: print("The %s dosen't have this %s value" % (fp, onekey)) return extract_dict diff --git a/pyCHX/v2/_futurepyCHX/DEVs.py b/pyCHX/v2/_futurepyCHX/DEVs.py index 19fd4e5..ad7688b 100644 --- a/pyCHX/v2/_futurepyCHX/DEVs.py +++ b/pyCHX/v2/_futurepyCHX/DEVs.py @@ -4,6 +4,8 @@ import skbeam.core.roi as roi from numpy.fft import fft, ifft from tqdm import tqdm +import matplotlib.pyplot as plt +from scipy.optimize import leastsq def fit_one_peak_curve(x, y, fit_range): @@ -76,7 +78,7 @@ def plot_xy_with_fit( return ax -#############For APD detector +# For APD detector def get_pix_g2_fft(time_inten): """YG Dev@CHX 2018/12/4 get g2 for oneD intensity g2 = G/(P*F) @@ -134,7 +136,7 @@ def get_pix_g2_PF(time_inten): return P, F -################### +# def get_ab_correlation(a, b): @@ -207,9 +209,9 @@ def auto_correlation_fft_padding_zeros(a, axis=-1): Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) In numpy form auto_cor(arr) = ifft( - fft( arr, n=2N-1, axis=axis ) ##padding enough zeros - ## for axis - * np.conjugate( ## conju for reverse array + fft( arr, n=2N-1, axis=axis ) #padding enough zeros + # for axis + * np.conjugate( # conju for reverse array fft(arr , n=2N-1, axis=axis) ) ) #do reverse fft Input: @@ -251,9 +253,9 @@ def auto_correlation_fft(a, axis=-1): Based on auto_cor(arr) = ifft( fft( arr ) * fft(arr[::-1]) ) In numpy form auto_cor(arr) = ifft( - fft( arr, n=2N-1, axis=axis ) ##padding enough zeros - ## for axis - * np.conjugate( ## conju for reverse array + fft( arr, n=2N-1, axis=axis ) #padding enough zeros + # for axis + * np.conjugate( # conju for reverse array fft(arr , n=2N-1, axis=axis) ) ) #do reverse fft Input: @@ -286,7 +288,7 @@ def multitau(Ipix, bind, lvl=12, nobuf=8): plot(tt[1:],g2[1:,i]) will plot each g2. """ # if num_lev is None: - # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 + # num_lev = int(np.log( noframes/(num_buf-1))/np.log(2) +1) +1 # print(nobuf,nolvl) nobins = bind.max() + 1 nobufov2 = nobuf // 2 @@ -345,7 +347,7 @@ def average_array_withNan(array, axis=0, mask=None): array_ = np.ma.masked_array(array, mask=mask) try: sums = np.array(np.ma.sum(array_[:, :], axis=axis)) - except: + except Exception: sums = np.array(np.ma.sum(array_[:], axis=axis)) cts = np.sum(~mask, axis=axis) @@ -412,8 +414,8 @@ def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=No # IF_mask = mask_pix[tau: Nt,: ] # IPF_mask = IP_mask | IF_mask # IPFm = average_array_withNan(IP*IF, axis = 0, )#mask= IPF_mask ) - # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) - # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) + # IPm = average_array_withNan(IP, axis = 0, )# mask= IP_mask ) + # IFm = average_array_withNan(IF, axis = 0 , )# mask= IF_mask ) G2[tau_ind] = average_array_withNan( IP * IF, axis=0, @@ -428,8 +430,8 @@ def autocor_for_pix_time(pix_time_data, dly_dict, pixel_norm=None, frame_norm=No ) # IFm tau_ind += 1 # for i in range(G2.shape[0]-1, 0, -1): - # if np.isnan(G2[i,0]): - # gmax = i + # if np.isnan(G2[i,0]): + # gmax = i gmax = tau_ind return G2[:gmax, :], Gp[:gmax, :], Gf[:gmax, :] @@ -447,11 +449,8 @@ def autocor_xytframe(self, n): return crl / (IP * IF) * FN -###################For Fit +# For Fit -import matplotlib.pyplot as plt -import numpy as np -from scipy.optimize import leastsq # duplicate my curfit function from yorick, except use sigma and not w # notice the main feature is an adjust list. @@ -493,12 +492,12 @@ def _residuals(p, x, y, sigy, pall, adj, fun): def fitpr(chisq, a, sigmaa, title=None, lbl=None): """nicely print out results of a fit""" # get fitted results. - if lbl == None: + if lbl is None: lbl = [] for i in xrange(a.size): lbl.append("A%(#)02d" % {"#": i}) # print resuls of a fit. - if title != None: + if title is not None: print(title) print(" chisq=%(c).4f" % {"c": chisq}) for i in range(a.size): @@ -529,7 +528,7 @@ def Gaussian(x, p): return g -###########For ellipse shaped sectors by users +# For ellipse shaped sectors by users def elps_r(a, b, theta): """ Returns the radius of an ellipse with semimajor/minor axes a/b diff --git a/pyCHX/v2/_futurepyCHX/DataGonio.py b/pyCHX/v2/_futurepyCHX/DataGonio.py index 18bf602..9082596 100644 --- a/pyCHX/v2/_futurepyCHX/DataGonio.py +++ b/pyCHX/v2/_futurepyCHX/DataGonio.py @@ -232,7 +232,7 @@ def convert_Qmap_old(img, qx_map, qy_map=None, bins=None, rangeq=None): # Mask -################################################################################ +# class Mask(object): """Stores the matrix of pixels to be excluded from further analysis.""" @@ -301,11 +301,11 @@ def invert(self): self.data = -1 * (self.data - 1) # End class Mask(object) - ######################################## + # # Calibration -################################################################################ +# class Calibration(object): """Stores aspects of the experimental setup; especially the calibration parameters for a particular detector. That is, the wavelength, detector @@ -326,7 +326,7 @@ def __init__(self, wavelength_A=None, distance_m=None, pixel_size_um=None): self.clear_maps() # Experimental parameters - ######################################## + # def set_wavelength(self, wavelength_A): """Set the experimental x-ray wavelength (in Angstroms).""" @@ -422,7 +422,7 @@ def get_q_per_pixel(self): return self.q_per_pixel # Maps - ######################################## + # def clear_maps(self): self.r_map_data = None @@ -544,11 +544,11 @@ def _generate_qxyz_maps(self): ) # End class Calibration(object) - ######################################## + # # CalibrationGonio -################################################################################ +# class CalibrationGonio(Calibration): """ The geometric claculations used here are described: @@ -557,7 +557,7 @@ class CalibrationGonio(Calibration): """ # Experimental parameters - ######################################## + # def set_angles( self, @@ -673,7 +673,7 @@ def get_ratioDw(self): return self.distance_m / (width_mm / 1000.0) # Maps - ######################################## + # def q_map(self): if self.q_map_data is None: diff --git a/pyCHX/v2/_futurepyCHX/SAXS.py b/pyCHX/v2/_futurepyCHX/SAXS.py index e08b8f5..7837dbe 100644 --- a/pyCHX/v2/_futurepyCHX/SAXS.py +++ b/pyCHX/v2/_futurepyCHX/SAXS.py @@ -133,7 +133,7 @@ def find_index_old(x, x0, tolerance=None): N = len(x) i = 0 position = None - if tolerance == None: + if tolerance is None: tolerance = (x[1] - x[0]) / 2.0 if x0 > max(x): position = len(x) - 1 @@ -489,7 +489,7 @@ def get_form_factor_fit2( # print(q4_bg) # resL = leastsq( fit_funcs, [ p ], args=( iq_, q_, num_points, spread, fit_func, function ), - # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) + # full_output=1, ftol=1.49012e-38, xtol=1.49012e-10, factor=100) # radius, sigma, delta_rho, background = np.abs(pfit) if not q4_bg: @@ -531,7 +531,7 @@ def get_form_factor_fit2( for i in range(len(pfit)): try: error.append(np.absolute(pcov[i][i]) ** 0.5) - except: + except Exception: error.append(None) pfit_leastsq = pfit perr_leastsq = np.array(error) @@ -944,7 +944,7 @@ def show_saxs_qmap( if w < minW: img_ = img[cx - w // 2 : cx + w // 2, cy + w // 2 : cy + w // 2] # elif w > maxW: - # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = + # img_[ cx-w//2:cx+w//2, cy+w//2:cy+w//2 ] = ROI = [ max(0, center[0] - w), @@ -1013,12 +1013,12 @@ def show_saxs_qmap( return ax -######################## -##Fit sphere by scipy.leastsq fit +# +# Fit sphere by scipy.leastsq fit def fit_sphere_form_factor_func(parameters, ydata, xdata, yerror=None, nonvariables=None): - """##Develop by YG at July 28, 2017 @CHX + """#Develop by YG at July 28, 2017 @CHX This function is for fitting form factor of polyderse spherical particles by using scipy.leastsq fit radius, sigma, delta_rho, background = parameters @@ -1044,7 +1044,7 @@ def fit_sphere_form_factor_by_leastsq( pq, fit_range=None, ): - """##Develop by YG at July 28, 2017 @CHX + """#Develop by YG at July 28, 2017 @CHX Fitting form factor of polyderse spherical particles by using scipy.leastsq fit Input: radius, sigma, delta_rho, background = p0 @@ -1070,7 +1070,7 @@ def fit_sphere_form_factor_by_leastsq( def plot_fit_sphere_form_factor(q, pq, res, p0=None, xlim=None, ylim=None): - """##Develop by YG at July 28, 2017 @CHX""" + """#Develop by YG at July 28, 2017 @CHX""" if p0 is not None: radius, sigma, delta_rho, background = p0 diff --git a/pyCHX/v2/_futurepyCHX/Stitching.py b/pyCHX/v2/_futurepyCHX/Stitching.py index da5291f..ecb42c3 100644 --- a/pyCHX/v2/_futurepyCHX/Stitching.py +++ b/pyCHX/v2/_futurepyCHX/Stitching.py @@ -95,7 +95,7 @@ def Correct_Overlap_Images_Intensities( fig = plt.figure()# figsize=[2,8]) for i in range(len(infiles)): - #print(i) + # print(i) ax = fig.add_subplot(1,8, i+1) d = process.load( infiles[i] ) show_img( dataM[i], logs = True, show_colorbar= False,show_ticks =False, @@ -188,14 +188,14 @@ def stitch_WAXS_in_Qspace(dataM, phis, calibration, dx=0, dy=22, dz=0, dq=0.015, phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV - #calibration.set_image_size( data.shape[1], data.shape[0] ) + # calibration.set_image_size( data.shape[1], data.shape[0] ) calibration.set_image_size(195, height=1475) # Pilatus300kW vertical calibration.set_pixel_size(pixel_size_um=172.0) calibration.set_beam_position(97.0, 1314.0) calibration.set_distance(0.275) Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) - #Get center of the qmap + # Get center of the qmap bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) print( bx, by ) @@ -329,7 +329,7 @@ def get_phi(filename, phi_offset=0, phi_start=4.5, phi_spacing=4.0, polarity=-1, return phi_c -############For CHX beamline +# For CHX beamline def get_qmap_qxyz_range( @@ -430,14 +430,14 @@ def stitch_WAXS_in_Qspace_CHX( phi_offset=4.649, phi_start=1.0, phi_spacing=5.0,) for infile in infiles] ) # For TWD data calibration = CalibrationGonio(wavelength_A=0.619920987) # 20.0 keV - #calibration.set_image_size( data.shape[1], data.shape[0] ) + # calibration.set_image_size( data.shape[1], data.shape[0] ) calibration.set_image_size(195, height=1475) # Pilatus300kW vertical calibration.set_pixel_size(pixel_size_um=172.0) calibration.set_beam_position(97.0, 1314.0) calibration.set_distance(0.275) Intensity_map, qxs, qzs = stitch_WAXS_in_Qspace( dataM, phis, calibration) - #Get center of the qmap + # Get center of the qmap bx,by = np.argmin( np.abs(qxs) ), np.argmin( np.abs(qzs) ) print( bx, by ) """ diff --git a/pyCHX/v2/_futurepyCHX/Two_Time_Correlation_Function.py b/pyCHX/v2/_futurepyCHX/Two_Time_Correlation_Function.py index b3d7899..fe4aa6f 100644 --- a/pyCHX/v2/_futurepyCHX/Two_Time_Correlation_Function.py +++ b/pyCHX/v2/_futurepyCHX/Two_Time_Correlation_Function.py @@ -1,8 +1,8 @@ -###################################################################################### -########Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL################ -########Time correlation function, include one-time, two-time, four-time############## -########Muli-tau method, array-operation method####################################### -###################################################################################### +# +# Dec 16, 2015, Yugang Zhang, yuzhang@bnl.gov, CHX, NSLS-II, BNL# +# Time correlation function, include one-time, two-time, four-time# +# Muli-tau method, array-operation method# +# import itertools @@ -71,7 +71,7 @@ def __init__(self, indexable, pixelist): # self.shape = indexable.shape try: self.length = len(indexable) - except: + except Exception: self.length = indexable.length def get_data(self): @@ -95,7 +95,7 @@ def __init__(self, indexable, mask): self.mask = mask try: self.shape = indexable.shape - except: + except Exception: # if self.shape = [len(indexable), indexable[0].shape[0], indexable[0].shape[1]] # self.shape = indexable.shape @@ -252,10 +252,10 @@ def auto_two_Array(data, rois, data_pixel=None): g12b[:, :, qi - 1] = np.dot(data_pixel_qi, data_pixel_qi.T) / sum1 / sum2 / nopr[qi - 1] # print ( proi, int( qi //( Unitq) ) ) - # if int( qi //( Unitq) ) == proi: - # sys.stdout.write("#") - # sys.stdout.flush() - # proi += 1 + # if int( qi //( Unitq) ) == proi: + # sys.stdout.write("#") + # sys.stdout.flush() + # proi += 1 elapsed_time = time.time() - start_time print("Total time: %.2f min" % (elapsed_time / 60.0)) @@ -263,14 +263,14 @@ def auto_two_Array(data, rois, data_pixel=None): return g12b -#################################### -##Derivation of Two time correlation -##################################### +# +# Derivation of Two time correlation +# -##################################### +# # get one-time @different age -##################################### +# def get_qedge2(qstart, qend, qwidth, noqs, return_int=False): @@ -438,7 +438,7 @@ def get_aged_g2_from_g12q(g12q, age_edge, age_center=None, timeperframe=1, time_ arr = rotate_g12q_to_rectangle(g12q) m, n = arr.shape # m should be 2*n-1 # age_edge, age_center = get_qedge( qstart=slice_start,qend= slice_end, - # qwidth = slice_width, noqs =slice_num ) + # qwidth = slice_width, noqs =slice_num ) # print(arr.shape) age_edge = np.int_(age_edge) if age_center is None: @@ -739,7 +739,7 @@ def plot_aged_g2(g2_aged, tau=None, timeperframe=1, ylim=None, xlim=None): ax.set_ylim(xlim) -##################################### +# # get fout-time @@ -924,9 +924,9 @@ def histogram_taus(taus, hisbin=20, plot=True, timeperframe=1): return his -##################################### +# # get one-time -##################################### +# def get_one_time_from_two_time_old(g12, norms=None, nopr=None): @@ -1046,7 +1046,7 @@ def get_four_time_from_two_time(g12, g2=None, rois=None): return g4f12 -###### +# def make_g12_mask(badframes_list, g12_shape): """ Dec 16, 2015, Y.G.@CHX @@ -1263,7 +1263,7 @@ def show_C12( else: timeperframe = 1 - if "timeoffset" in kwargs.keys(): ### added timeoffset here + if "timeoffset" in kwargs.keys(): # added timeoffset here timeoffset = kwargs["timeoffset"] else: timeoffset = 0 @@ -1306,7 +1306,7 @@ def show_C12( fig, ax = fig_ax # extent=[0, data.shape[0]*timeperframe, 0, data.shape[0]*timeperframe ] - extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset ### added timeoffset to extend + extent = np.array([N1, N2, N1, N2]) * timeperframe + timeoffset # added timeoffset to extend if logs: im = imshow( diff --git a/pyCHX/v2/_futurepyCHX/XPCS_GiSAXS.py b/pyCHX/v2/_futurepyCHX/XPCS_GiSAXS.py index 8c57ff8..dd19155 100644 --- a/pyCHX/v2/_futurepyCHX/XPCS_GiSAXS.py +++ b/pyCHX/v2/_futurepyCHX/XPCS_GiSAXS.py @@ -87,8 +87,8 @@ def get_gisaxs_roi(Qr, Qz, qr_map, qz_map, mask=None, qval_dict=None): return roi_mask, qval_dict -############ -##developed at Octo 11, 2016 +# +# developed at Octo 11, 2016 def get_qr(data, Qr, Qz, qr, qz, mask=None): """Octo 12, 2016, Y.G.@CHX plot one-d of I(q) as a function of qr for different qz @@ -102,12 +102,12 @@ def get_qr(data, Qr, Qz, qr, qz, mask=None): Return: qr_1d, a dataframe, with columns as qr1, qz1 (float value), qr2, qz2,.... Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -159,9 +159,9 @@ def get_qr(data, Qr, Qz, qr, qz, mask=None): return df -######################## +# # get one-d of I(q) as a function of qr for different qz -##################### +# def cal_1d_qr( @@ -194,14 +194,14 @@ def cal_1d_qr( Plot 1D cureve as a function of Qr for each Qz Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -433,9 +433,9 @@ def plot_t_qrc(qr_1d, frame_edge, save=False, pargs=None, fontsize=8, *argv, **k ) -########################################## -###Functions for GiSAXS -########################################## +# +# Functions for GiSAXS +# def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): @@ -453,9 +453,9 @@ def make_gisaxs_grid(qr_w=10, qz_w=12, dim_r=100, dim_z=120): return y -########################################### +# # for Q-map, convert pixel to Q -########################################### +# def convert_Qmap(img, qx_map, qy_map=None, bins=None, rangeq=None, mask=None, statistic="sum"): @@ -693,9 +693,9 @@ def get_qedge2( return qedge, qcenter -########################################### +# # for plot Q-map -########################################### +# def get_qmap_label(qmap, qedge): @@ -860,14 +860,14 @@ def get_1d_qr( Examples: - #to make two-qz, from 0.018 to 0.046, width as 0.008, + # to make two-qz, from 0.018 to 0.046, width as 0.008, qz_width = 0.008 qz_start = 0.018 + qz_width/2 qz_end = 0.046 - qz_width/2 qz_num= 2 - #to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 + # to make one-qr, from 0.02 to 0.1, and the width is 0.1-0.012 qr_width = 0.1-0.02 qr_start = 0.02 + qr_width /2 qr_end = 0.01 - qr_width /2 @@ -1084,14 +1084,14 @@ def get_qr_tick_label(qr, label_array_qr, inc_x0, interp=True): w = np.where(rticks <= inc_x0)[0] rticks1 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) rticks_label1 = np.round(rticks_label[w], 3) - except: + except Exception: rticks_label1 = [] try: w = np.where(rticks > inc_x0)[0] rticks2 = np.int_(np.interp(np.round(rticks_label[w], 3), rticks_label[w], rticks[w])) rticks = np.append(rticks1, rticks2) rticks_label2 = np.round(rticks_label[w], 3) - except: + except Exception: rticks_label2 = [] rticks_label = np.append(rticks_label1, rticks_label2) @@ -1176,7 +1176,7 @@ def get_qzr_map(qr, qz, inc_x0, Nzline=10, Nrline=10, interp=True, return_qrz_la # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) try: rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) - except: + except Exception: rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) # stride = int(len(zticks)/10) ticks = [zticks, zticks_label, rticks, rticks_label] @@ -1352,7 +1352,7 @@ def show_qzr_map(qr, qz, inc_x0, data=None, Nzline=10, Nrline=10, interp=True, * # rticks,rticks_label = get_qr_tick_label(label_array_qr,inc_x0) try: rticks, rticks_label = zip(*np.sort(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) - except: + except Exception: rticks, rticks_label = zip(*sorted(zip(*get_qr_tick_label(qr, label_array_qr, inc_x0, interp=interp)))) # stride = int(len(zticks)/10) @@ -1821,7 +1821,7 @@ def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *a try: qz_center = res_pargs["qz_center"] qr_center = res_pargs["qr_center"] - except: + except Exception: roi_label = res_pargs["roi_label"] path = res_pargs["path"] @@ -1835,7 +1835,7 @@ def save_gisaxs_g2(g2, res_pargs, time_label=False, taus=None, filename=None, *a for qz in qz_center: for qr in qr_center: columns.append([str(qz), str(qr)]) - except: + except Exception: columns.append([v for (k, v) in roi_label.items()]) df.columns = columns @@ -1914,10 +1914,10 @@ def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, # uid=res_pargs['uid'] num_rings = g2.shape[1] - beta = np.zeros(num_rings) # contrast factor - rate = np.zeros(num_rings) # relaxation rate - alpha = np.zeros(num_rings) # alpha - baseline = np.zeros(num_rings) # baseline + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline if function == "simple_exponential" or function == "simple": _vars = np.unique(_vars + ["alpha"]) @@ -2141,7 +2141,7 @@ def fit_gisaxs_g2(g2, res_pargs, function="simple_exponential", one_plot=False, # GiSAXS End -############################### +# def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, plot_=True, *argv, **kwargs): @@ -2154,7 +2154,7 @@ def get_each_box_mean_intensity(data_series, box_mask, sampling, timeperframe, p mean_int_sets, index_list = roi.mean_intensity(np.array(data_series[::sampling]), box_mask) try: N = len(data_series) - except: + except Exception: N = data_series.length times = np.arange(N) * timeperframe # get the time for each frame num_rings = len(np.unique(box_mask)[1:]) @@ -2232,7 +2232,7 @@ def fit_qr_qz_rate(qr, qz, rate, plot_=True, *argv, **kwargs): for i, qz_ in enumerate(qz): try: y = np.array(rate["rate"][i * Nqr : (i + 1) * Nqr]) - except: + except Exception: y = np.array(rate[i * Nqr : (i + 1) * Nqr]) # print( len(x), len(y) ) @@ -2465,7 +2465,7 @@ def multi_uids_gisaxs_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2498,7 +2498,7 @@ def multi_uids_gisaxs_xpcs_analysis( md["Measurement"] = db[uid]["start"]["Measurement"] # md['sample']=db[uid]['start']['sample'] # print( md['Measurement'] ) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2510,7 +2510,7 @@ def multi_uids_gisaxs_xpcs_analysis( acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... setup_pargs = dict( uid=uid, dpix=dpix, Ldet=Ldet, lambda_=lambda_, timeperframe=timeperframe, path=data_dir ) diff --git a/pyCHX/v2/_futurepyCHX/XPCS_SAXS.py b/pyCHX/v2/_futurepyCHX/XPCS_SAXS.py index c59f6cc..ae5aab8 100644 --- a/pyCHX/v2/_futurepyCHX/XPCS_SAXS.py +++ b/pyCHX/v2/_futurepyCHX/XPCS_SAXS.py @@ -378,15 +378,15 @@ def circular_average( image_mask = np.ravel(image) # if nx is None: #make a one-pixel width q - # nx = int( max_r - min_r) + # nx = int( max_r - min_r) # if min_x is None: - # min_x= int( np.min( binr)) - # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) + # min_x= int( np.min( binr)) + # min_x_= int( np.min( binr)/(np.sqrt(pixel_size[1]*pixel_size[0] ))) # if max_x is None: - # max_x = int( np.max(binr )) - # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) + # max_x = int( np.max(binr )) + # max_x_ = int( np.max(binr)/(np.sqrt(pixel_size[1]*pixel_size[0] )) ) # if nx is None: - # nx = max_x_ - min_x_ + # nx = max_x_ - min_x_ # binr_ = np.int_( binr /(np.sqrt(pixel_size[1]*pixel_size[0] )) ) binr_ = binr / (np.sqrt(pixel_size[1] * pixel_size[0])) @@ -453,7 +453,7 @@ def get_circular_average( avg_img, center, threshold=0, nx=nx, pixel_size=(dpix, dpix), mask=mask, min_x=min_x, max_x=max_x ) qp_ = qp * dpix - # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) + # convert bin_centers from r [um] to two_theta and then to q [1/px] (reciprocal space) two_theta = utils.radius_to_twotheta(Ldet, qp_) q = utils.twotheta_to_q(two_theta, lambda_) if plot_: @@ -1154,7 +1154,7 @@ def get_angular_mask( """ mask: 2D-array inner_angle # the starting angle in unit of degree - outer_angle # the ending angle in unit of degree + outer_angle # the ending angle in unit of degree width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI edges: default, None. otherwise, give a customized angle edges num_angles # number of angles @@ -1240,7 +1240,7 @@ def get_angular_mask_old( """ mask: 2D-array inner_angle # the starting angle in unit of degree - outer_angle # the ending angle in unit of degree + outer_angle # the ending angle in unit of degree width # width of each angle, in degree, default is None, there is no gap between the neighbour angle ROI edges: default, None. otherwise, give a customized angle edges num_angles # number of angles @@ -1338,7 +1338,7 @@ def get_ring_mask( return_q_in_pixel=False, ): # def get_ring_mask( mask, inner_radius= 0.0020, outer_radius = 0.009, width = 0.0002, num_rings = 12, - # edges=None, unit='pixel',pargs=None ): + # edges=None, unit='pixel',pargs=None ): """ mask: 2D-array inner_radius #radius of the first ring @@ -1366,11 +1366,11 @@ def get_ring_mask( # qc = np.int_( np.linspace( inner_radius,outer_radius, num_rings ) ) # edges = np.zeros( [ len(qc), 2] ) # if width%2: - # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 +1 # else: - # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 + # edges[:,0],edges[:,1] = qc - width//2, qc + width//2 - # find the edges of the required rings + # find the edges of the required rings if edges is None: if num_rings != 1: spacing = (outer_radius - inner_radius - num_rings * width) / (num_rings - 1) # spacing between rings @@ -1700,9 +1700,9 @@ def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, retu # title_qa = '%.2f'%( ang_center[sn]) + r'$^\circ$' + '( %d )'%(i) # if num_qr==1: - # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa + # title = 'uid= %s:--->'%uid + title_qr + '__' + title_qa # else: - # title = title_qa + # title = title_qa title = title_qa ax.set_title(title, y=1.1, fontsize=12) y = g2[:, i] @@ -1730,9 +1730,9 @@ def plot_saxs_rad_ang_g2(g2, taus, res_pargs=None, master_angle_plot=False, retu return fig -############################################ -##a good func to fit g2 for all types of geogmetries -############################################ +# +# a good func to fit g2 for all types of geogmetries +# def fit_saxs_rad_ang_g2( @@ -1756,8 +1756,8 @@ def fit_saxs_rad_ang_g2( 'streched_exponential': fit by a streched exponential function, defined as beta * (np.exp(-2 * relaxation_rate * lags))**alpha + baseline - #fit_vibration: - # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation + # fit_vibration: + # if True, will fit the g2 by a dumped sin function due to beamline mechnical oscillation Returns ------- @@ -1801,14 +1801,14 @@ def fit_saxs_rad_ang_g2( print("Please give ang_center") num_rings = g2.shape[1] - beta = np.zeros(num_rings) # contrast factor - rate = np.zeros(num_rings) # relaxation rate - alpha = np.zeros(num_rings) # alpha - baseline = np.zeros(num_rings) # baseline + beta = np.zeros(num_rings) # contrast factor + rate = np.zeros(num_rings) # relaxation rate + alpha = np.zeros(num_rings) # alpha + baseline = np.zeros(num_rings) # baseline freq = np.zeros(num_rings) if function == "flow_para_function" or function == "flow_para": - flow = np.zeros(num_rings) # baseline + flow = np.zeros(num_rings) # baseline if "fit_variables" in kwargs: additional_var = kwargs["fit_variables"] _vars = [k for k in list(additional_var.keys()) if additional_var[k] is False] @@ -2116,7 +2116,7 @@ def multi_uids_saxs_flow_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector, reverse=True) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2166,7 +2166,7 @@ def multi_uids_saxs_flow_xpcs_analysis( # md['sample']= 'PS205000-PMMA-207000-SMMA3' print(md["Measurement"]) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2177,7 +2177,7 @@ def multi_uids_saxs_flow_xpcs_analysis( acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... center = md["center"] setup_pargs = dict( @@ -2192,7 +2192,7 @@ def multi_uids_saxs_flow_xpcs_analysis( md["avg_img"] = avg_img # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], - # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) min_inten = 10 # good_start = np.where( np.array(imgsum) > min_inten )[0][0] @@ -2209,7 +2209,7 @@ def multi_uids_saxs_flow_xpcs_analysis( print("The good_end frame number is: %s " % good_end_) norm = None - ################### + # # Do correlaton here for nconf, seg_mask in enumerate([seg_mask_v, seg_mask_p]): @@ -2436,7 +2436,7 @@ def multi_uids_saxs_xpcs_analysis( try: detector = get_detector(db[uid]) imgs = load_data(uid, detector, reverse=True) - except: + except Exception: print("The %i--th uid: %s can not load data" % (i, uid)) imgs = 0 @@ -2485,7 +2485,7 @@ def multi_uids_saxs_xpcs_analysis( # md['sample']= 'PS205000-PMMA-207000-SMMA3' print(md["Measurement"]) - except: + except Exception: md["Measurement"] = "Measurement" md["sample"] = "sample" @@ -2496,7 +2496,7 @@ def multi_uids_saxs_xpcs_analysis( acquisition_period = md["frame_time"] timeperframe = acquisition_period # for g2 # timeperframe = exposuretime#for visiblitly - # timeperframe = 2 ## manual overwrite!!!! we apparently writing the wrong metadata.... + # timeperframe = 2 # manual overwrite!!!! we apparently writing the wrong metadata.... center = md["center"] setup_pargs = dict( @@ -2511,7 +2511,7 @@ def multi_uids_saxs_xpcs_analysis( md["avg_img"] = avg_img # plot1D( y = imgsum[ np.array( [i for i in np.arange( len(imgsum)) if i not in bad_frame_list])], - # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) + # title ='Uid= %s--imgsum'%uid, xlabel='Frame', ylabel='Total_Intensity', legend='' ) min_inten = 10 # good_start = np.where( np.array(imgsum) > min_inten )[0][0] @@ -2658,7 +2658,7 @@ def plot_mul_g2(g2s, md): # print ( len_tau, len(y)) # ax.semilogx(taus[1:len_], y[1:len_], marker = '%s'%next(markers_), color='%s'%next(colors_), - # markersize=6, label = '%s'%sid) + # markersize=6, label = '%s'%sid) ax.semilogx( taus[1:len_], y[1:len_], marker=markers[i], color=colors[i], markersize=6, label="%s" % sid diff --git a/pyCHX/v2/_futurepyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py b/pyCHX/v2/_futurepyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py index 98907ef..07b3592 100644 --- a/pyCHX/v2/_futurepyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py +++ b/pyCHX/v2/_futurepyCHX/XPCS_XSVS_SAXS_Multi_2017_V4.py @@ -24,10 +24,10 @@ def XPCS_XSVS_SAXS_Multi( run_two_time = run_pargs["run_two_time"] run_four_time = run_pargs["run_four_time"] run_xsvs = run_pargs["run_xsvs"] - ############################################################### + # if scat_geometry != "saxs": # to be done for other types run_xsvs = False - ############################################################### + # att_pdf_report = run_pargs["att_pdf_report"] show_plot = run_pargs["show_plot"] CYCLE = run_pargs["CYCLE"] @@ -66,7 +66,7 @@ def XPCS_XSVS_SAXS_Multi( data_dir_ = data_dir uid_ = uid_average - ### For Load results + # For Load results multi_res = {} for uid, fuid in zip(guids, fuids): @@ -526,7 +526,7 @@ def XPCS_XSVS_SAXS_Multi( export_xpcs_results_to_h5(uid + "_Res.h5", data_dir, export_dict=Exdt) # extract_dict = extract_xpcs_results_from_h5( filename = uid + '_Res.h5', import_dir = data_dir ) - ## Create PDF report for each uid + # Create PDF report for each uid pdf_out_dir = data_dir pdf_filename = "XPCS_Analysis_Report_for_%s%s.pdf" % (uid_average, pdf_version) if run_xsvs: @@ -549,7 +549,7 @@ def XPCS_XSVS_SAXS_Multi( run_xsvs, report_type=scat_geometry, ) - ### Attach each g2 result to the corresponding olog entry + # Attach each g2 result to the corresponding olog entry if att_pdf_report: os.environ["HTTPS_PROXY"] = "https://proxy:8888" os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" @@ -561,7 +561,7 @@ def XPCS_XSVS_SAXS_Multi( text="Add XPCS Averaged Analysis PDF Report", attachments=atch, ) - except: + except Exception: print( "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." % pname @@ -609,14 +609,14 @@ def XPCS_XSVS_SAXS_Multi( mask_path="/XF11ID/analysis/2016_3/masks/", mask_name="Nov28_4M_SAXS_mask.npy", good_start=5, - #####################################for saxs + # for saxs uniformq=True, inner_radius=0.005, # 0.005 for 50 nmAu/SiO2, 0.006, #for 10nm/coralpor outer_radius=0.04, # 0.04 for 50 nmAu/SiO2, 0.05, #for 10nm/coralpor num_rings=12, gap_ring_number=6, number_rings=1, - ############################for gi_saxs + # for gi_saxs # inc_x0 = 1473, # inc_y0 = 372, # refl_x0 = 1473, diff --git a/pyCHX/v2/_futurepyCHX/__init__.py b/pyCHX/v2/_futurepyCHX/__init__.py index a266959..3c3eaba 100644 --- a/pyCHX/v2/_futurepyCHX/__init__.py +++ b/pyCHX/v2/_futurepyCHX/__init__.py @@ -2,4 +2,4 @@ # # from ._version import get_versions # __version__ = get_versions()['version'] -# del get_versions +# diff --git a/pyCHX/v2/_futurepyCHX/chx_Fitters2D.py b/pyCHX/v2/_futurepyCHX/chx_Fitters2D.py index 852502e..b99c275 100644 --- a/pyCHX/v2/_futurepyCHX/chx_Fitters2D.py +++ b/pyCHX/v2/_futurepyCHX/chx_Fitters2D.py @@ -198,11 +198,11 @@ def __call__(self, XY, img, **kwargs): self.mod = Model(self.fitfunc, independent_vars=["XY"], param_names=self.params.keys()) # assumes first var is dependent var res = self.mod.fit(img.ravel(), XY=(XY[0].ravel(), XY[1].ravel()), params=params, **kwargs) - ## old version, only return values + # old version, only return values # add reduced chisq to parameter list # res.best_values['chisq']=res.redchi # return res.best_values - ## new version, also return the std + # new version, also return the std resf = {} ks = list(res.params.keys()) for var in ks: diff --git a/pyCHX/v2/_futurepyCHX/chx_compress.py b/pyCHX/v2/_futurepyCHX/chx_compress.py index 8ac7184..1bb5970 100644 --- a/pyCHX/v2/_futurepyCHX/chx_compress.py +++ b/pyCHX/v2/_futurepyCHX/chx_compress.py @@ -48,7 +48,7 @@ def pass_FD(FD, n): # FD.rdframe(n) try: FD.seekimg(n) - except: + except Exception: pass return False @@ -244,7 +244,7 @@ def read_compressed_eigerdata( else: try: mask, avg_img, imgsum, bad_frame_list_ = pkl.load(open(filename + ".pkl", "rb")) - except: + except Exception: CAL = True if CAL: FD = Multifile(filename, beg, end) @@ -382,8 +382,8 @@ def para_compress_eigerdata( print("No bad frames are involved.") print("Combining the seperated compressed files together...") combine_compressed(filename, Nf, del_old=True) - del results - del res_ + + if with_pickle: pkl.dump([mask, avg_img, imgsum, bad_frame_list], open(filename + ".pkl", "wb")) if copy_rawdata: @@ -589,14 +589,14 @@ def segment_compress_eigerdata( fp.write(struct.pack("@{}{}".format(dlen, "ih"[nobytes == 2]), *v)) else: fp.write(struct.pack("@{}{}".format(dlen, "dd"[nobytes == 2]), *v)) # n +=1 - del p, v, img + fp.flush() fp.close() avg_img /= good_count bad_frame_list = (np.array(imgsum) > bad_pixel_threshold) | (np.array(imgsum) <= bad_pixel_low_threshold) sys.stdout.write("#") sys.stdout.flush() - # del images, mask, avg_img, imgsum, bad_frame_list + # # print( 'Should release memory here') return mask, avg_img, imgsum, bad_frame_list @@ -910,7 +910,7 @@ def __init__(self, filename, beg, end, reverse=False): NOTE: At each record n, the file cursor points to record n+1 """ self.FID = open(filename, "rb") - # self.FID.seek(0,os.SEEK_SET) + # self.FID.seek(0,os.SEEK_SET) self.filename = filename # br: bytes read br = self.FID.read(1024) @@ -1394,8 +1394,8 @@ def mean_intensityc(FD, labeled_array, sampling=1, index=None, multi_cor=False): for i in inputs: mean_intensity[:, i] = res[i] print("ROI mean_intensit calculation is DONE!") - del results - del res + + mean_intensity /= norm return mean_intensity, index diff --git a/pyCHX/v2/_futurepyCHX/chx_correlation.py b/pyCHX/v2/_futurepyCHX/chx_correlation.py index 2ef23d2..992cef4 100644 --- a/pyCHX/v2/_futurepyCHX/chx_correlation.py +++ b/pyCHX/v2/_futurepyCHX/chx_correlation.py @@ -1,27 +1,27 @@ -# ###################################################################### +# # # Developed at the NSLS-II, Brookhaven National Laboratory # -# # +# # # Copyright (c) 2014, Brookhaven Science Associates, Brookhaven # # National Laboratory. All rights reserved. # -# # +# # # Redistribution and use in source and binary forms, with or without # # modification, are permitted provided that the following conditions # # are met: # -# # +# # # * Redistributions of source code must retain the above copyright # -# notice, this list of conditions and the following disclaimer. # -# # +# notice, this list of conditions and the following disclaimer. # +# # # * Redistributions in binary form must reproduce the above copyright # -# notice this list of conditions and the following disclaimer in # -# the documentation and/or other materials provided with the # -# distribution. # -# # +# notice this list of conditions and the following disclaimer in # +# the documentation and/or other materials provided with the # +# distribution. # +# # # * Neither the name of the Brookhaven Science Associates, Brookhaven # -# National Laboratory nor the names of its contributors may be used # -# to endorse or promote products derived from this software without # -# specific prior written permission. # -# # +# National Laboratory nor the names of its contributors may be used # +# to endorse or promote products derived from this software without # +# specific prior written permission. # +# # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS # # "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT # # LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS # @@ -34,7 +34,7 @@ # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OTHERWISE) ARISING # # IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE # # POSSIBILITY OF SUCH DAMAGE. # -######################################################################## +# """ This module is for functions specific to time correlation @@ -131,7 +131,7 @@ def _one_time_process( future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] @@ -672,7 +672,7 @@ def _two_time_process( img_per_level[level] += 1 # in multi-tau correlation other than first level all other levels - # have to do the half of the correlation + # have to do the half of the correlation if level == 0: i_min = 0 else: @@ -684,7 +684,7 @@ def _two_time_process( past_img = buf[level, delay_no] future_img = buf[level, buf_no] - # get the matrix of correlation function without normalizations + # get the matrix of correlation function without normalizations tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations pi_binned = np.bincount(label_array, weights=past_img)[1:] @@ -892,7 +892,7 @@ class CrossCorrelator: >> cimg = cc(img1) or, mask may m >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted diff --git a/pyCHX/v2/_futurepyCHX/chx_correlationc.py b/pyCHX/v2/_futurepyCHX/chx_correlationc.py index fb31982..b829715 100644 --- a/pyCHX/v2/_futurepyCHX/chx_correlationc.py +++ b/pyCHX/v2/_futurepyCHX/chx_correlationc.py @@ -87,7 +87,7 @@ def _one_time_process( past_img = buf[level, delay_no] future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images @@ -178,7 +178,7 @@ def _one_time_process_error( past_img = buf[level, delay_no] future_img = buf[level, buf_no] # find the normalization that can work both for bad_images - # and good_images + # and good_images ind = int(t_index - lev_len[:level].sum()) normalize = img_per_level[level] - i - norm[level + 1][ind] # take out the past_ing and future_img created using bad images @@ -187,13 +187,13 @@ def _one_time_process_error( norm[level + 1][ind] += 1 else: # for w, arr in zip([past_img*future_img, past_img, future_img], - # [G, past_intensity_norm, future_intensity_norm, - # ]): - # binned = np.bincount(label_array, weights=w)[1:] - # #nonz = np.where(w)[0] - # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] - # arr[t_index] += ((binned / num_pixels - - # arr[t_index]) / normalize) + # [G, past_intensity_norm, future_intensity_norm, + # ]): + # binned = np.bincount(label_array, weights=w)[1:] + # #nonz = np.where(w)[0] + # #binned = np.bincount(label_array[nonz], weights=w[nonz], minlength=maxqind+1 )[1:] + # arr[t_index] += ((binned / num_pixels - + # arr[t_index]) / normalize) for w, arr in zip( [past_img * future_img, past_img, future_img], [ @@ -982,7 +982,7 @@ def lazy_two_time( norm=None, ): # def lazy_two_time(labels, images, num_frames, num_bufs, num_levels=1, - # two_time_internal_state=None): + # two_time_internal_state=None): """Generator implementation of two-time correlation If you do not want multi-tau correlation, set num_levels to 1 and num_bufs to the number of images you wish to correlate @@ -1202,7 +1202,7 @@ def _two_time_process( img_per_level[level] += 1 # in multi-tau correlation other than first level all other levels - # have to do the half of the correlation + # have to do the half of the correlation if level == 0: i_min = 0 else: @@ -1216,7 +1216,7 @@ def _two_time_process( # print( np.sum( past_img ), np.sum( future_img )) - # get the matrix of correlation function without normalizations + # get the matrix of correlation function without normalizations tmp_binned = np.bincount(label_array, weights=past_img * future_img)[1:] # get the matrix of past intensity normalizations pi_binned = np.bincount(label_array, weights=past_img)[1:] @@ -1433,7 +1433,7 @@ def cal_g2c( g_max = min(g_max1, g_max2) # print(g_max) # g2_ = (s.G[:g_max] / (s.past_intensity[:g_max] * - # s.future_intensity[:g_max])) + # s.future_intensity[:g_max])) g2[:g_max, qi - 1] = avgGi[:g_max] / (avgPi[:g_max] * avgFi[:g_max]) g2_err[:g_max, qi - 1] = np.sqrt( (1 / (avgFi[:g_max] * avgPi[:g_max])) ** 2 * devGi[:g_max] ** 2 @@ -1508,9 +1508,9 @@ def __init__( if end is None: self.end = FD.end # if self.beg ==0: - # self.length = self.end - self.beg + # self.length = self.end - self.beg # else: - # self.length = self.end - self.beg + 1 + # self.length = self.end - self.beg + 1 self.length = self.end - self.beg @@ -1555,18 +1555,18 @@ def get_data(self): if self.mean_int_sets is not None: # for each frame will normalize each ROI by it's averaged value for j in range(noqs): # if i ==100: - # if j==0: - # print( self.mean_int_sets[i][j] ) - # print( qind_[ noprs[j]: noprs[j+1] ] ) + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] # if i==100: - # print( i, Mean_Int_Qind[ self.qind== 11 ]) + # print( i, Mean_Int_Qind[ self.qind== 11 ]) # print('Do norm_mean_int here') # if i ==10: - # print( norm_Mean_Int_Qind ) + # print( norm_Mean_Int_Qind ) else: norm_Mean_Int_Qind = 1.0 if self.imgsum is not None: @@ -1580,7 +1580,7 @@ def get_data(self): norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi # if i==100: - # print(norm_Mean_Int_Qind[:100]) + # print(norm_Mean_Int_Qind[:100]) data_array[n][pxlist] = v[w] / norms n += 1 @@ -1623,9 +1623,9 @@ def __init__( if end is None: self.end = FD.end # if self.beg ==0: - # self.length = self.end - self.beg + # self.length = self.end - self.beg # else: - # self.length = self.end - self.beg + 1 + # self.length = self.end - self.beg + 1 self.length = self.end - self.beg @@ -1669,18 +1669,18 @@ def get_data(self): if self.mean_int_sets is not None: # for normalization of each averaged ROI of each frame for j in range(noqs): # if i ==100: - # if j==0: - # print( self.mean_int_sets[i][j] ) - # print( qind_[ noprs[j]: noprs[j+1] ] ) + # if j==0: + # print( self.mean_int_sets[i][j] ) + # print( qind_[ noprs[j]: noprs[j+1] ] ) Mean_Int_Qind[qind_[noprs[j] : noprs[j + 1]]] = self.mean_int_sets[i][j] norm_Mean_Int_Qind = Mean_Int_Qind[pxlist] # self.mean_int_set or Mean_Int_Qind[pxlist] # if i==100: - # print( i, Mean_Int_Qind[ self.qind== 11 ]) + # print( i, Mean_Int_Qind[ self.qind== 11 ]) # print('Do norm_mean_int here') # if i ==10: - # print( norm_Mean_Int_Qind ) + # print( norm_Mean_Int_Qind ) else: norm_Mean_Int_Qind = 1.0 if self.imgsum is not None: @@ -1699,7 +1699,7 @@ def get_data(self): norms = norm_Mean_Int_Qind * norm_imgsum * norm_avgimg_roi # if i==100: - # print(norm_Mean_Int_Qind[:100]) + # print(norm_Mean_Int_Qind[:100]) data_array[n][pxlist] = v[w] / norms n += 1 @@ -1747,7 +1747,7 @@ def auto_two_Arrayc(data_pixel, rois, index=None): try: g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1814,7 +1814,7 @@ def auto_two_Arrayc_ExplicitNorm(data_pixel, rois, norm=None, index=None): try: g12b = np.zeros([noframes, noframes, len(qlist)]) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1879,7 +1879,7 @@ def two_time_norm(data_pixel, rois, index=None): try: norm = np.zeros(len(qlist)) DO = True - except: + except Exception: print( "The array is too large. The Sever can't handle such big array. Will calulate different Q sequencely" ) @@ -1927,7 +1927,7 @@ def check_normalization(frame_num, q_list, imgsa, data_pixel): ) # plot1D( raw_data/mean_int_sets_[frame_num][q-1], ax=ax[1], legend='q=%s'%(q), m=markers[n], - # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) + # xlabel='pixel',title='fra=%s_norm_data'%(frame_num)) # print( mean_int_sets_[frame_num][q-1] ) plot1D( norm_data, diff --git a/pyCHX/v2/_futurepyCHX/chx_correlationp.py b/pyCHX/v2/_futurepyCHX/chx_correlationp.py index 646e750..f6b421d 100644 --- a/pyCHX/v2/_futurepyCHX/chx_correlationp.py +++ b/pyCHX/v2/_futurepyCHX/chx_correlationp.py @@ -370,8 +370,8 @@ def cal_c12p( lag_steps = res[0][1] print("G2 calculation DONE!") - del results - del res + + return c12, lag_steps[lag_steps < noframes] @@ -627,7 +627,7 @@ def lazy_one_timep( g_max = min(g_max1, g_max2) g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) # sys.stdout.write('#') - # del FD + # # sys.stdout.flush() # print (g2) # return results(g2, s.lag_steps[:g_max], s) @@ -743,9 +743,9 @@ def cal_g2p( res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] len_lag = 10**10 for i in inputs: # to get the smallest length of lag_step, - ##***************************** - ##Here could result in problem for significantly cut useful data if some Q have very short tau list - ##**************************** + # ***************************** + # Here could result in problem for significantly cut useful data if some Q have very short tau list + # **************************** if len_lag > len(res[i][1]): lag_steps = res[i][1] len_lag = len(lag_steps) @@ -797,8 +797,8 @@ def cal_g2p( if len(lag_steps_err) < len(lag_stepsi): lag_steps_err = lag_stepsi - del results - del res + + if cal_error: print("G2 with error bar calculation DONE!") return g2[:Gmax, :], lag_steps_err[:Gmax], g2_err[:Gmax, :] / np.sqrt(nopr) @@ -908,8 +908,8 @@ def cal_GPF( g2_G[:, qind == 1 + i] = res[i][2] # [:len_lag] g2_P[:, qind == 1 + i] = res[i][3] # [:len_lag] g2_F[:, qind == 1 + i] = res[i][4] # [:len_lag] - del results - del res + + return g2_G, g2_P, g2_F @@ -931,12 +931,12 @@ def get_g2_from_ROI_GPF(G, P, F, roi_mask): g2 = np.zeros([G.shape[0], noqs]) g2_err = np.zeros([G.shape[0], noqs]) for i in range(1, 1 + noqs): - ## G[0].shape is the same as roi_mask shape + # G[0].shape is the same as roi_mask shape if len(G.shape) > 2: s_Gall_qi = G[:, roi_mask == i] s_Pall_qi = P[:, roi_mask == i] s_Fall_qi = F[:, roi_mask == i] - ## G[0].shape is the same length as pixelist + # G[0].shape is the same length as pixelist else: s_Gall_qi = G[:, qind == i] s_Pall_qi = P[:, qind == i] @@ -1019,7 +1019,7 @@ def auto_two_Arrayp(data_pixel, rois, index=None): # pool = Pool(processes= len(inputs) ) # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], - # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] # res = [r.get() for r in results] pool = Pool(processes=len(inputs)) diff --git a/pyCHX/v2/_futurepyCHX/chx_correlationp2.py b/pyCHX/v2/_futurepyCHX/chx_correlationp2.py index 9abe33f..e770dbd 100644 --- a/pyCHX/v2/_futurepyCHX/chx_correlationp2.py +++ b/pyCHX/v2/_futurepyCHX/chx_correlationp2.py @@ -354,8 +354,8 @@ def cal_c12p( lag_steps = res[0][1] print("G2 calculation DONE!") - del results - del res + + return c12, lag_steps[lag_steps < noframes] @@ -603,7 +603,7 @@ def lazy_one_timep( g_max = min(g_max1, g_max2) g2 = s.G[:g_max] / (s.past_intensity[:g_max] * s.future_intensity[:g_max]) # sys.stdout.write('#') - # del FD + # # sys.stdout.flush() # print (g2) # return results(g2, s.lag_steps[:g_max], s) @@ -708,9 +708,9 @@ def cal_g2p( res = [results[k].get() for k in tqdm(list(sorted(results.keys())))] len_lag = 10**10 for i in inputs: # to get the smallest length of lag_step, - ##***************************** - ##Here could result in problem for significantly cut useful data if some Q have very short tau list - ##**************************** + # ***************************** + # Here could result in problem for significantly cut useful data if some Q have very short tau list + # **************************** if len_lag > len(res[i][1]): lag_steps = res[i][1] len_lag = len(lag_steps) @@ -769,8 +769,8 @@ def cal_g2p( g2_P[:, nopr_[i] : nopr_[i + 1]] = s_Pall_qi g2_F[:, nopr_[i] : nopr_[i + 1]] = s_Fall_qi - del results - del res + + if cal_error: print("G2 with error bar calculation DONE!") return ( @@ -836,7 +836,7 @@ def auto_two_Arrayp(data_pixel, rois, index=None): # pool = Pool(processes= len(inputs) ) # results = [ apply_async( pool, _get_two_time_for_one_q, ( qlist[i], - # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] + # data_pixel_qis[i], nopr, noframes ) ) for i in tqdm( inputs ) ] # res = [r.get() for r in results] pool = Pool(processes=len(inputs)) diff --git a/pyCHX/v2/_futurepyCHX/chx_crosscor.py b/pyCHX/v2/_futurepyCHX/chx_crosscor.py index 28e839b..01d000c 100644 --- a/pyCHX/v2/_futurepyCHX/chx_crosscor.py +++ b/pyCHX/v2/_futurepyCHX/chx_crosscor.py @@ -1,8 +1,8 @@ # Develop new version # Original from #/XF11ID/analysis/Analysis_Pipelines/Develop/chxanalys/chxanalys/chx_correlation.py -# ###################################################################### +# # # Let's change from mask's to indices -######################################################################## +# """ This module is for functions specific to spatial correlation in order to tackle the motion of speckles @@ -62,7 +62,7 @@ def direct_corss_cor(im1, im2): elif j < 0: d1 = im1[:j, :] d2 = im2[-j:, :] - else: ##j>0 + else: #j>0 d1 = im1[j:, :] d2 = im2[:-j, :] elif i < 0: @@ -72,7 +72,7 @@ def direct_corss_cor(im1, im2): elif j < 0: d1 = im1[:j, :i] d2 = im2[-j:, -i:] - else: ##j>0 + else: #j>0 d1 = im1[j:, :i] d2 = im2[:-j, -i:] else: # i>0: @@ -82,7 +82,7 @@ def direct_corss_cor(im1, im2): elif j < 0: d1 = im1[:j, i:] d2 = im2[-j:, :-i] - else: ##j>0 + else: #j>0 d1 = im1[j:, i:] d2 = im2[:-j, :-i] # print(i,j) @@ -106,7 +106,7 @@ class CrossCorrelator2: >> cimg = cc(img1) or, mask may may be ids >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted @@ -268,15 +268,15 @@ def __call__(self, img1, img2=None, normalization=None, check_res=False): ccorr = _centered(ccorr, self.sizes[reg, :]) # print('here') - ###check here + # check here if check_res: if reg == 0: self.norm = maskcor self.ck = ccorr.copy() - # print(ccorr.max()) + # print(ccorr.max()) self.tmp = tmpimg self.fs = fshape - ###end the check + # end the check # now handle the normalizations if "symavg" in normalization: @@ -312,10 +312,10 @@ def __call__(self, img1, img2=None, normalization=None, check_res=False): if check_res: if reg == 0: self.ckn = ccorr.copy() - # print('here') - # print( np.average(tmpimg[w]) ) - # print( maskcor[w] ) - # print( ccorr.max(), maskcor[w], np.average(tmpimg[w]), np.average(tmpimg2[w]) ) + # print('here') + # print( np.average(tmpimg[w]) ) + # print( maskcor[w] ) + # print( ccorr.max(), maskcor[w], np.average(tmpimg[w]), np.average(tmpimg2[w]) ) ccorrs.append(ccorr) if len(ccorrs) == 1: @@ -333,9 +333,9 @@ def _centered(img, sz): return img -##define a custmoized fftconvolve +# define a custmoized fftconvolve -######################################################################################## +# # modifided version from signaltools.py in scipy (Mark March 2017) # Author: Travis Oliphant # 1999 -- 2002 @@ -576,7 +576,7 @@ class CrossCorrelator1: >> cimg = cc(img1) or, mask may may be ids >> cc = CrossCorrelator(ids) - #(where ids is same shape as img1) + # (where ids is same shape as img1) >> cc1 = cc(img1) >> cc12 = cc(img1, img2) # if img2 shifts right of img1, point of maximum correlation is shifted @@ -683,7 +683,7 @@ def __init__(self, shape, mask=None, normalization=None): maskcorr = _cross_corr1(submask) # quick fix for #if self.wrap is False: - # submask = _expand_image1(submask)finite numbers should be integer so + # submask = _expand_image1(submask)finite numbers should be integer so # choose some small value to threshold maskcorr *= maskcorr > 0.5 self.maskcorrs.append(maskcorr) @@ -781,7 +781,7 @@ def __call__(self, img1, img2=None, normalization=None, desc="cc"): return ccorrs -##for parallel +# for parallel from multiprocessing import Pool import dill @@ -837,7 +837,7 @@ def run_para_ccorr_sym(ccorr_sym, FD, nstart=0, nend=None, imgsum=None, img_norm for i in range(Nc): cc[i] = cc[i] / N - del results - del res + + return cc diff --git a/pyCHX/v2/_futurepyCHX/chx_generic_functions.py b/pyCHX/v2/_futurepyCHX/chx_generic_functions.py index 0e3c577..2fb2f7f 100644 --- a/pyCHX/v2/_futurepyCHX/chx_generic_functions.py +++ b/pyCHX/v2/_futurepyCHX/chx_generic_functions.py @@ -17,7 +17,7 @@ # from tqdm import * from pyCHX.chx_libs import * -from pyCHX.chx_libs import colors, markers +# from pyCHX.chx_libs import colors, markers markers = [ "o", @@ -47,7 +47,8 @@ markers = np.array(markers * 100) -flatten_nestlist = lambda l: [item for sublist in l for item in sublist] +def flatten_nestlist(l): + return [item for sublist in l for item in sublist] """a function to flatten a nest list e.g., flatten( [ ['sg','tt'],'ll' ] ) gives ['sg', 'tt', 'l', 'l'] @@ -858,7 +859,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 tifs = list(db[uid].data("OAV_image"))[0] try: pixel_scalebar = np.ceil(scalebar_size / md["OAV resolution um_pixel"]) - except: + except Exception: pixel_scalebar = None print("No OAVS resolution is available.") @@ -882,7 +883,7 @@ def save_oavs_tifs(uid, data_dir, brightness_scale=1, scalebar_size=100, scale=1 img = oavs[m] try: ind = np.flipud(img * scale)[:, :, 2] < threshold - except: + except Exception: ind = np.flipud(img * scale) < threshold rgb_cont_img = np.copy(np.flipud(img)) # rgb_cont_img[ind,0]=1000 @@ -1212,7 +1213,7 @@ def ps(y, shift=0.5, replot=True, logplot="off", x=None): PEAK_y = np.max(y) COM = np.sum(x * y) / np.sum(y) - ### from Maksim: assume this is a peak profile: + # from Maksim: assume this is a peak profile: def is_positive(num): return True if num > 0 else False @@ -1232,18 +1233,18 @@ def is_positive(num): ps.cen = CEN yf = ym # return { - # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), - # 'x_range': list_of_roots, + # 'fwhm': abs(list_of_roots[-1] - list_of_roots[0]), + # 'x_range': list_of_roots, # } else: # ok, maybe it's a step function.. # print('no peak...trying step function...') ym = ym + shift - def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang + def err_func(x, x0, k=2, A=1, base=0): # erf fit from Yugang return base - A * erf(k * (x - x0)) mod = Model(err_func) - ### estimate starting values: + # estimate starting values: x0 = np.mean(x) # k=0.1*(np.max(x)-np.min(x)) pars = mod.make_params(x0=x0, k=2, A=1.0, base=0.0) @@ -1261,7 +1262,7 @@ def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang ps.fwhm = FWHM if replot: - ### re-plot results: + # re-plot results: if logplot == "on": fig, ax = plt.subplots() # plt.figure() ax.semilogy([PEAK, PEAK], [np.min(y), np.max(y)], "k--", label="PEAK") @@ -1289,7 +1290,7 @@ def err_func(x, x0, k=2, A=1, base=0): #### erf fit from Yugang # plt.title('uid: '+str(uid)+' @ '+str(t)+'\nPEAK: '+str(PEAK_y)[:8]+' @ '+str(PEAK)[:8]+' COM @ '+str(COM)[:8]+ '\n FWHM: '+str(FWHM)[:8]+' @ CEN: '+str(CEN)[:8],size=9) # plt.show() - ### assign values of interest as function attributes: + # assign values of interest as function attributes: ps.peak = PEAK ps.com = COM return ps.cen @@ -1480,7 +1481,7 @@ def average_array_withNan(array, axis=0, mask=None): array_ = np.ma.masked_array(array, mask=mask) try: sums = np.array(np.ma.sum(array_[:, :], axis=axis)) - except: + except Exception: sums = np.array(np.ma.sum(array_[:], axis=axis)) cts = np.sum(~mask, axis=axis) @@ -1863,7 +1864,7 @@ def linear_fit(x, y, xrange=None): def find_index(x, x0, tolerance=None): """YG Octo 16,2017 copied from SAXS find index of x0 in x - #find the position of P in a list (plist) with tolerance + # find the position of P in a list (plist) with tolerance """ N = len(x) @@ -1880,13 +1881,13 @@ def find_index(x, x0, tolerance=None): def find_index_old(x, x0, tolerance=None): """YG Octo 16,2017 copied from SAXS find index of x0 in x - #find the position of P in a list (plist) with tolerance + # find the position of P in a list (plist) with tolerance """ N = len(x) i = 0 position = None - if tolerance == None: + if tolerance is None: tolerance = (x[1] - x[0]) / 2.0 if x0 > max(x): position = len(x) - 1 @@ -1991,7 +1992,7 @@ def sgolay2d(z, window_size, order, derivative=None): Z[-half_size:, :half_size] = band - np.abs(np.fliplr(Z[-half_size:, half_size + 1 : 2 * half_size + 1]) - band) # solve system and convolve - if derivative == None: + if derivative is None: m = np.linalg.pinv(A)[0].reshape((window_size, -1)) return scipy.signal.fftconvolve(Z, m, mode="valid") elif derivative == "col": @@ -2039,7 +2040,7 @@ def extract_data_from_file( Or giving start_row: int good_cols: list of integer, good index of cols lables: the label of the good_cols - #save: False, if True will save the data into a csv file with filename appending csv ?? + # save: False, if True will save the data into a csv file with filename appending csv ?? Return: a pds.dataframe Example: @@ -2077,7 +2078,7 @@ def extract_data_from_file( else: temp = np.array([els[j] for j in good_cols], dtype=float) data = np.vstack((data, temp)) - except: + except Exception: pass if labels is None: labels = np.arange(data.shape[1]) @@ -2107,9 +2108,9 @@ def get_print_uids(start_time, stop_time, return_all_info=False): date = time.ctime(hdrs[-i - 1]["start"]["time"]) try: m = hdrs[-i - 1]["start"]["Measurement"] - except: + except Exception: m = "" - info = "%3d: uid = '%s' ##%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) + info = "%3d: uid = '%s' #%s #%s: %s-- %s " % (i, uid, date, sid, m, fuid) print(info) if return_all_info: all_info[n] = info @@ -2251,7 +2252,7 @@ def validate_uid(uid): imgs = load_data(uid, md["detector"], reverse=True) print(imgs) return 1 - except: + except Exception: print("Can't load this uid=%s!" % uid) return 0 @@ -2399,7 +2400,7 @@ def filter_roi_mask(filter_dict, roi_mask, avg_img, filter_type="ylim"): return rm -## +# # Dev at March 31 for create Eiger chip mask def create_chip_edges_mask(det="1M"): """Create a chip edge mask for Eiger detector""" @@ -2451,7 +2452,7 @@ def create_folder(base_folder, sub_folder): """ data_dir0 = os.path.join(base_folder, sub_folder) - ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + # Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 @@ -2472,15 +2473,15 @@ def create_user_folder(CYCLE, username=None, default_dir="/XF11ID/analysis/"): data_dir0 = os.path.join(default_dir, CYCLE, username, "Results/") else: data_dir0 = os.path.join(default_dir, CYCLE + "/") - ##Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' + # Or define data_dir here, e.g.,#data_dir = '/XF11ID/analysis/2016_2/rheadric/test/' os.makedirs(data_dir0, exist_ok=True) print("Results from this analysis will be stashed in the directory %s" % data_dir0) return data_dir0 -################################## -#########For dose analysis ####### -################################## +# +# For dose analysis # +# def get_fra_num_by_dose(exp_dose, exp_time, att=1, dead_time=2): """ Calculate the frame number to be correlated by giving a X-ray exposure dose @@ -2583,14 +2584,14 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * dpix = md["x_pixel_size"] * 1000.0 # in mm, eiger 4m is 0.075 mm try: lambda_ = md["wavelength"] - except: + except Exception: lambda_ = md["incident_wavelength"] # wavelegth of the X-rays in Angstroms try: Ldet = md["det_distance"] if Ldet <= 1000: Ldet *= 1000 md["det_distance"] = Ldet - except: + except Exception: Ldet = md["detector_distance"] if Ldet <= 1000: Ldet *= 1000 @@ -2598,14 +2599,14 @@ def check_lost_metadata(md, Nimg=None, inc_x0=None, inc_y0=None, pixelsize=7.5 * try: # try exp time from detector exposuretime = md["count_time"] # exposure time in sec - except: + except Exception: exposuretime = md["cam_acquire_time"] # exposure time in sec try: # try acq time from detector acquisition_period = md["frame_time"] - except: + except Exception: try: acquisition_period = md["acquire period"] - except: + except Exception: uid = md["uid"] acquisition_period = float(db[uid]["start"]["acquire period"]) timeperframe = acquisition_period @@ -2806,7 +2807,7 @@ def find_uids(start_time, stop_time): hdrs = db(start_time=start_time, stop_time=stop_time) try: print("Totally %s uids are found." % (len(list(hdrs)))) - except: + except Exception: pass sids = [] uids = [] @@ -3078,7 +3079,7 @@ def print_dict(dicts, keys=None): for k in keys: try: print("%s--> %s" % (k, dicts[k])) - except: + except Exception: pass @@ -3120,7 +3121,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md["suid"] = uid # short uid try: md["filename"] = get_sid_filenames(header)[2][0] - except: + except Exception: md["filename"] = "N.A." devices = sorted(list(header.devices())) @@ -3140,7 +3141,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): # detector_names = sorted( header.start['detectors'] ) detector_names = sorted(get_detectors(db[uid])) # if len(detector_names) > 1: - # raise ValueError("More than one det. This would have unintented consequences.") + # raise ValueError("More than one det. This would have unintented consequences.") detector_name = detector_names[0] # md['detector'] = detector_name md["detector"] = get_detector(header) @@ -3151,12 +3152,12 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md[newkey] = val # for k,v in ev['descriptor']['configuration'][dec]['data'].items(): - # md[ k[len(dec)+1:] ]= v + # md[ k[len(dec)+1:] ]= v try: md.update(header.start["plan_args"].items()) md.pop("plan_args") - except: + except Exception: pass md.update(header.start.items()) @@ -3165,7 +3166,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): md["stop_time"] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(header.stop["time"])) try: # added: try to handle runs that don't contain image data md["img_shape"] = header["descriptors"][0]["data_keys"][md["detector"]]["shape"][:2][::-1] - except: + except Exception: if verbose: print("couldn't find image shape...skip!") else: @@ -3174,7 +3175,7 @@ def get_meta_data(uid, default_dec="eiger", *argv, **kwargs): # for k, v in sorted(md.items()): # ... - # print(f'{k}: {v}') + # print(f'{k}: {v}') return md @@ -3223,7 +3224,7 @@ def get_max_countc(FD, labeled_array): (p, v) = FD.rdrawframe(i) w = np.where(timg[p])[0] max_inten = max(max_inten, np.max(v[w])) - except: + except Exception: pass return max_inten @@ -3375,7 +3376,7 @@ def create_cross_mask( imy, imx = image.shape cx, cy = center bst_mask = np.zeros_like(image, dtype=bool) - ### + # # for right part wy = wy_right x = np.array([cx, imx, imx, cx]) @@ -3383,7 +3384,7 @@ def create_cross_mask( rr, cc = polygon(y, x) bst_mask[rr, cc] = 1 - ### + # # for left part wy = wy_left x = np.array([0, cx, cx, 0]) @@ -3391,7 +3392,7 @@ def create_cross_mask( rr, cc = polygon(y, x) bst_mask[rr, cc] = 1 - ### + # # for up part wx = wx_up x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) @@ -3399,7 +3400,7 @@ def create_cross_mask( rr, cc = polygon(y, x) bst_mask[rr, cc] = 1 - ### + # # for low part wx = wx_down x = np.array([cx - wx, cx + wx, cx + wx, cx - wx]) @@ -3462,7 +3463,7 @@ def export_scan_scalar( return datap -##### +# # load data by databroker @@ -3646,7 +3647,7 @@ def load_data2(uid, detector="eiger4m_single_image"): try: (ev,) = hdr.events(fields=[detector]) flag = 0 - except: + except Exception: flag += 1 print("Trying again ...!") @@ -3791,8 +3792,8 @@ def RemoveHot(img, threshold=1e7, plot_=True): return mask -############ -###plot data +# +# plot data def show_img( @@ -3995,34 +3996,34 @@ def plot1D( legend = " " try: logx = kwargs["logx"] - except: + except Exception: logx = False try: logy = kwargs["logy"] - except: + except Exception: logy = False try: logxy = kwargs["logxy"] - except: + except Exception: logxy = False - if logx == True and logy == True: + if logx and logy: logxy = True try: marker = kwargs["marker"] - except: + except Exception: try: marker = kwargs["m"] - except: + except Exception: marker = next(markers_) try: color = kwargs["color"] - except: + except Exception: try: color = kwargs["c"] - except: + except Exception: color = next(colors_) if x is None: @@ -4076,7 +4077,7 @@ def plot1D( title = "plot" ax.set_title(title) # ax.set_xlabel("$Log(q)$"r'($\AA^{-1}$)') - if (legend != "") and (legend != None): + if (legend != "") and (legend is not None): ax.legend(loc="best", fontsize=legend_size) if "save" in kwargs.keys(): if kwargs["save"]: @@ -4089,7 +4090,7 @@ def plot1D( return fig -### +# def check_shutter_open(data_series, min_inten=0, time_edge=[0, 10], plot_=False, *argv, **kwargs): @@ -5165,7 +5166,7 @@ def save_g2_general(g2, taus, qr=None, qz=None, uid="uid", path=None, return_res return df -########### +# # *for g2 fit and plot @@ -5203,7 +5204,7 @@ def flow_para_function(x, beta, relaxation_rate, flow_velocity, baseline=1): def flow_para_function_explicitq(x, beta, diffusion, flow_velocity, alpha=1, baseline=1, qr=1, q_ang=0): """Nov 9, 2017 Basically, make q vector to (qr, angle), - ###relaxation_rate is actually a diffusion rate + # relaxation_rate is actually a diffusion rate flow_velocity: q.v (q vector dot v vector = q*v*cos(angle) ) Diffusion part: np.exp( -2*D q^2 *tau ) q_ang: would be np.radians( ang - 90 ) @@ -5524,7 +5525,7 @@ def get_g2_fit_general( try: if isinstance(_guess_val[k], (np.ndarray, list)): pars[k].value = _guess_val[k][i] - except: + except Exception: pass if True: @@ -5563,7 +5564,7 @@ def get_g2_fit_general( pars["%s" % v].vary = False # if i==20: - # print(pars) + # print(pars) # print( pars ) result1 = mod.fit(y, pars, x=lags) # print(qval_dict[i][0], qval_dict[i][1], y) @@ -5665,9 +5666,9 @@ def get_short_long_labels_from_qval_dict(qval_dict, geometry="saxs"): ) -############################################ -##a good func to plot g2 for all types of geogmetries -############################################ +# +# a good func to plot g2 for all types of geogmetries +# def plot_g2_general( @@ -5759,7 +5760,7 @@ def plot_g2_general( for k in list(g2_dict.keys()): g2_dict_[k] = g2_dict[k][:, [i for i in qth_interest]] # for k in list(taus_dict.keys()): - # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] + # taus_dict_[k] = taus_dict[k][:,[i for i in qth_interest]] taus_dict_ = taus_dict qval_dict_ = {k: qval_dict[k] for k in qth_interest} if fit_res is not None: @@ -5797,8 +5798,8 @@ def plot_g2_general( ind_long_i = ind_long[s_ind] num_long_i = len(ind_long_i) # if show_average_ang_saxs: - # if geometry=='ang_saxs': - # num_long_i += 1 + # if geometry=='ang_saxs': + # num_long_i += 1 if RUN_GUI: fig = Figure(figsize=(10, 12)) else: @@ -5869,8 +5870,8 @@ def plot_g2_general( for i, l_ind in enumerate(ind_long_i): if num_long_i <= max_plotnum_fig: # if s_ind ==2: - # print('Here') - # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) + # print('Here') + # print(i, l_ind, short_label[s_ind], long_label[l_ind], sx, sy, i+1 ) ax = fig.add_subplot(sx, sy, i + 1) if sx == 1: if sy == 1: @@ -5936,7 +5937,7 @@ def plot_g2_general( dumy = g2_dict_[k].shape # print( 'here is the shape' ) islist = False - except: + except Exception: islist_n = len(g2_dict_[k]) islist = True # print( 'here is the list' ) @@ -6121,7 +6122,7 @@ def plot_g2_general( vmin, vmax = kwargs["vlim"] try: ax.set_ylim([ymin * vmin, ymax * vmax]) - except: + except Exception: pass else: pass @@ -6142,7 +6143,7 @@ def plot_g2_general( # print(fig) try: plt.savefig(fp + ".png", dpi=fig.dpi) - except: + except Exception: print("Can not save figure here.") else: diff --git a/pyCHX/v2/_futurepyCHX/chx_handlers.py b/pyCHX/v2/_futurepyCHX/chx_handlers.py index 998ce9c..426c679 100644 --- a/pyCHX/v2/_futurepyCHX/chx_handlers.py +++ b/pyCHX/v2/_futurepyCHX/chx_handlers.py @@ -1,5 +1,5 @@ -###Copied from chxtools/chxtools/handlers.py -###https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py +# Copied from chxtools/chxtools/handlers.py +# https://github.com/NSLS-II-CHX/chxtools/blob/master/chxtools/handlers.py # handler registration and database instantiation should be done diff --git a/pyCHX/v2/_futurepyCHX/chx_libs.py b/pyCHX/v2/_futurepyCHX/chx_libs.py index aa18797..c05aa55 100644 --- a/pyCHX/v2/_futurepyCHX/chx_libs.py +++ b/pyCHX/v2/_futurepyCHX/chx_libs.py @@ -3,7 +3,7 @@ yuzhang@bnl.gov This module is for the necessary packages for the XPCS analysis """ -## Import all the required packages for Data Analysis +# Import all the required packages for Data Analysis from databroker import Broker from databroker.assets.path_only_handlers import RawHandler @@ -40,9 +40,9 @@ import skbeam.core.utils as utils # * scikit-beam - data analysis tools for X-ray science -# - https://github.com/scikit-beam/scikit-beam +# - https://github.com/scikit-beam/scikit-beam # * xray-vision - plotting helper functions for X-ray science -# - https://github.com/Nikea/xray-vision +# - https://github.com/Nikea/xray-vision import xray_vision import xray_vision.mpl_plotting as mpl_plot from lmfit import Model, Parameter, Parameters, minimize, report_fit @@ -370,7 +370,7 @@ # colors_ = itertools.cycle(sorted_colors_ ) markers_ = itertools.cycle(markers) # Custom colormaps -################################################################################ +# # ROYGBVR but with Cyan-Blue instead of Blue color_list_cyclic_spectrum = [ [1.0, 0.0, 0.0], diff --git a/pyCHX/v2/_futurepyCHX/chx_speckle.py b/pyCHX/v2/_futurepyCHX/chx_speckle.py index 75ab068..8075a2e 100644 --- a/pyCHX/v2/_futurepyCHX/chx_speckle.py +++ b/pyCHX/v2/_futurepyCHX/chx_speckle.py @@ -151,7 +151,7 @@ def xsvs( try: noframes = len(images) - except: + except Exception: noframes = images.length # Num= { key: [0]* len( dict_dly[key] ) for key in list(dict_dly.keys()) } @@ -413,9 +413,9 @@ def get_bin_edges(num_times, num_rois, mean_roi, max_cts): return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers -################# -##for fit -################### +# +# for fit +# from scipy import stats from scipy.special import gamma, gammaln @@ -505,7 +505,7 @@ def nbinom_dist(bin_values, K, M): return nbinom -#########poisson +# poisson def poisson(x, K): """Poisson distribution function. K is average photon counts @@ -741,7 +741,7 @@ def fit_xsvs1( axes.set_xlabel("K/") axes.set_ylabel("P(K)") - # Using the best K and M values interpolate and get more values for fitting curve + # Using the best K and M values interpolate and get more values for fitting curve fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) if func == "bn": @@ -883,7 +883,7 @@ def plot_xsvs_g2(g2, taus, res_pargs=None, *argv, **kwargs): # plt.show() -###########################3 +# 3 # @@ -999,7 +999,7 @@ def get_xsvs_fit( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -1014,7 +1014,7 @@ def get_xsvs_fit( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) diff --git a/pyCHX/v2/_futurepyCHX/chx_specklecp.py b/pyCHX/v2/_futurepyCHX/chx_specklecp.py index a4e5029..b57658c 100644 --- a/pyCHX/v2/_futurepyCHX/chx_specklecp.py +++ b/pyCHX/v2/_futurepyCHX/chx_specklecp.py @@ -242,8 +242,8 @@ def xsvsp_single( ) print("Histogram calculation DONE!") - del results - del res + + return bin_edges, prob_k, prob_k_std_dev, his_sum @@ -560,7 +560,7 @@ def xsvsc_single( processing = 0 # print( level ) # prob_k_std_dev = np.power((prob_k_pow - - # np.power(prob_k, 2)), .5) + # np.power(prob_k, 2)), .5) for i in range(num_times): for j in range(num_roi): @@ -571,8 +571,8 @@ def xsvsc_single( prob_k[i, j] = prob_k[i, j] / his_sum[i, j] # for i in range(num_times): - # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): - # pass + # if isinstance(prob_k[i,0], float ) or isinstance(prob_k[i,0], int ): + # pass return bin_edges, prob_k, prob_k_std_dev, his_sum @@ -624,9 +624,9 @@ def _process( track_bad_level[level] += 1 # print (img_per_level,track_bad_level) u_labels = list(np.unique(labels)) - ############## - ##To Do list here, change histogram to bincount - ##Change error bar calculation + # + # To Do list here, change histogram to bincount + # Change error bar calculation if not (np.isnan(data).any()): for j, label in enumerate(u_labels): roi_data = data[labels == label] @@ -637,12 +637,12 @@ def _process( spe_hist = np.nan_to_num(spe_hist) # print( spe_hist.shape ) # prob_k[level, j] += (spe_hist - - # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) + # prob_k[level, j])/( img_per_level[level] - track_bad_level[level] ) # print( prob_k[level, j] ) prob_k[level, j] += spe_hist # print( spe_hist.shape, prob_k[level, j] ) # prob_k_pow[level, j] += (np.power(spe_hist, 2) - - # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) + # prob_k_pow[level, j])/(img_per_level[level] - track_bad_level[level]) def normalize_bin_edges(num_times, num_rois, mean_roi, max_cts): @@ -886,15 +886,15 @@ def get_bin_edges(num_times, num_rois, mean_roi, max_cts): return bin_edges, bin_centers, norm_bin_edges, norm_bin_centers -################# -##for fit -################### +# +# for fit +# from scipy import stats from scipy.special import gamma, gammaln -###########################3 -##Dev at Nov 18, 2016 +# 3 +# Dev at Nov 18, 2016 # @@ -944,8 +944,8 @@ def nbinomres(p, hist, x, hist_err=None, N=1): return err -########### -##Dev at Octo 12, 2017 +# +# Dev at Octo 12, 2017 def nbinom(p, x, mu): @@ -1097,7 +1097,7 @@ def get_xsvs_fit( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(kmean_guess) # resultL[0][0] ) + KL_val[i].append(kmean_guess) # resultL[0][0] ) else: # vary M and K fit_func = nbinomlog @@ -1112,7 +1112,7 @@ def get_xsvs_fit( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) @@ -1145,9 +1145,9 @@ def plot_xsvs_fit( """ # if qth is None: - # fig = plt.figure(figsize=(10,12)) + # fig = plt.figure(figsize=(10,12)) # else: - # fig = plt.figure(figsize=(8,8)) + # fig = plt.figure(figsize=(8,8)) max_cts = spe_cts_all[0][0].shape[0] - 1 num_times, num_rings = spe_cts_all.shape @@ -1537,7 +1537,7 @@ def get_xsvs_fit_old( full_output=1, ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -1552,13 +1552,13 @@ def get_xsvs_fit_old( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) # if max_bins==2: - # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) - # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) + # ML_val = np.array( [ML_val[k][0] for k in sorted(list(ML_val.keys()))] ) + # KL_val = np.array( [KL_val[k][0] for k in sorted(list(KL_val.keys()))] ) return ML_val, KL_val, np.array(K_) @@ -1647,7 +1647,7 @@ def nbinom_dist(bin_values, K, M): return nbinom -#########poisson +# poisson def poisson(x, K): """Poisson distribution function. K is average photon counts @@ -1874,7 +1874,7 @@ def fit_xsvs1( axes.set_xlabel("K/") axes.set_ylabel("P(K)") - # Using the best K and M values interpolate and get more values for fitting curve + # Using the best K and M values interpolate and get more values for fitting curve fitx_ = np.linspace(0, max(Knorm_bin_edges[j, i][:-1]), 1000) fitx = np.linspace(0, max(bin_edges[j, i][:-1]), 1000) if func == "bn": @@ -2094,7 +2094,7 @@ def get_xsvs_fit_old1( ) ML_val[i].append(abs(resultL[0][0])) - KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) + KL_val[i].append(K_mean[i] * 2**j) # resultL[0][0] ) else: # vary M and K @@ -2113,7 +2113,7 @@ def get_xsvs_fit_old1( ) ML_val[i].append(abs(resultL[0][1])) - KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) + KL_val[i].append(abs(resultL[0][0])) # resultL[0][0] ) # print( j, m0, resultL[0][1], resultL[0][0], K_mean[i] * 2**j ) if j == 0: K_.append(KL_val[i][0]) diff --git a/pyCHX/v2/_futurepyCHX/chx_xpcs_xsvs_jupyter_V1.py b/pyCHX/v2/_futurepyCHX/chx_xpcs_xsvs_jupyter_V1.py index d133286..a8915d8 100644 --- a/pyCHX/v2/_futurepyCHX/chx_xpcs_xsvs_jupyter_V1.py +++ b/pyCHX/v2/_futurepyCHX/chx_xpcs_xsvs_jupyter_V1.py @@ -374,9 +374,9 @@ def plot_entries_from_uids( return fig, ax -#################################################################################################### -##For real time analysis## -################################################################################################# +# +# For real time analysis# +# def get_iq_from_uids(uids, mask, setup_pargs): @@ -499,7 +499,7 @@ def wait_data_acquistion_finish(uid, wait_time=2, max_try_num=3): FINISH = True print("The data acquistion finished.") print("Starting to do something here...") - except: + except Exception: wait_func(wait_time=wait_time) w += 1 print("Try number: %s" % w) @@ -595,7 +595,7 @@ def do_compress_on_line(start_time, stop_time, mask_dict=None, mask=None, wait_t text="Data are on-line sparsified!", attachments=None, ) - except: + except Exception: print("There are something wrong with this data: %s..." % uid) print("*" * 50) return time.time() - t0 @@ -641,14 +641,14 @@ def realtime_xpcs_analysis( if finish: try: md = get_meta_data(uid) - ##corect some metadata + # corect some metadata if md_update is not None: md.update(md_update) # if 'username' in list(md.keys()): # try: - # md_cor['username'] = md_update['username'] - # except: - # md_cor = None + # md_cor['username'] = md_update['username'] + # except Exception: + # md_cor = None # uid = uid[:8] # print(md_cor) if not emulation: @@ -661,7 +661,7 @@ def realtime_xpcs_analysis( clear_plot=clear_plot, ) # update_olog_uid( uid= md['uid'], text='Data are on-line sparsified!',attachments=None) - except: + except Exception: print("There are something wrong with this data: %s..." % uid) else: print("\nThis is not a XPCS series. We will simiply ignore it.") @@ -673,9 +673,9 @@ def realtime_xpcs_analysis( return time.time() - t0 -#################################################################################################### -##compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress## -################################################################################################# +# +# compress multi uids, sequential compress for uids, but for each uid, can apply parallel compress# +# def compress_multi_uids( uids, mask, @@ -747,9 +747,9 @@ def compress_multi_uids( print("Done!") -#################################################################################################### -##get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid ## -################################################################################################# +# +# get_two_time_mulit_uids, sequential cal for uids, but apply parallel for each uid # +# def get_two_time_mulit_uids( @@ -817,7 +817,7 @@ def get_two_time_mulit_uids( data_pixel = Get_Pixel_Arrayc(FD, pixelist, norm=norm).get_data() g12b = auto_two_Arrayc(data_pixel, roi_mask, index=None) np.save(filename, g12b) - del g12b + # print("The two time correlation function for uid={} is saved as {}.".format(uid, filename)) @@ -968,17 +968,17 @@ def get_series_one_time_mulit_uids( try: g2_path = path + uid + "/" g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) - except: + except Exception: g2_path = path + md["uid"] + "/" g12b = np.load(g2_path + "uid=%s_g12b.npy" % uid) try: exp_time = float(md["cam_acquire_time"]) # *1000 #from second to ms - except: + except Exception: exp_time = float(md["exposure time"]) # * 1000 #from second to ms if trans is None: try: transi = md["transmission"] - except: + except Exception: transi = [1] else: transi = trans[i] @@ -1136,7 +1136,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_fit_form = False, run_waterfall = True,#False, run_t_ROI_Inten = True, - #run_fit_g2 = True, + # run_fit_g2 = True, fit_g2_func = 'stretched', run_one_time = True,#False, run_two_time = True,#False, @@ -1156,8 +1156,8 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= num_rings = 12, gap_ring_number = 6, number_rings= 1, - #qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 - #width = 0.0002 + # qcenters = [ 0.00235,0.00379,0.00508,0.00636,0.00773, 0.00902] #in A-1 + # width = 0.0002 qth_interest = 1, #the intested single qth use_sqnorm = False, use_imgsum_norm = True, @@ -1184,21 +1184,21 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_xsvs = run_pargs["run_xsvs"] try: run_dose = run_pargs["run_dose"] - except: + except Exception: run_dose = False - ############################################################### + # if scat_geometry == "gi_saxs": # to be done for other types run_xsvs = False - ############################################################### + # - ############################################################### + # if scat_geometry == "ang_saxs": run_xsvs = False run_waterfall = False run_two_time = False run_four_time = False run_t_ROI_Inten = False - ############################################################### + # if "bin_frame" in list(run_pargs.keys()): bin_frame = run_pargs["bin_frame"] bin_frame_number = run_pargs["bin_frame_number"] @@ -1216,12 +1216,12 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= use_imgsum_norm = run_pargs["use_imgsum_norm"] try: use_sqnorm = run_pargs["use_sqnorm"] - except: + except Exception: use_sqnorm = False try: inc_x0 = run_pargs["inc_x0"] inc_y0 = run_pargs["inc_y0"] - except: + except Exception: inc_x0 = None inc_y0 = None @@ -1264,7 +1264,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= try: username = run_pargs["username"] - except: + except Exception: username = getpass.getuser() data_dir0 = os.path.join("/XF11ID/analysis/", CYCLE, username, "Results/") @@ -1334,7 +1334,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= "beam_center_y", ], ) - ## Overwrite Some Metadata if Wrong Input + # Overwrite Some Metadata if Wrong Input dpix, lambda_, Ldet, exposuretime, timeperframe, center = check_lost_metadata( md, Nimg, inc_x0=inc_x0, inc_y0=inc_y0, pixelsize=7.5 * 10 * (-5) ) @@ -1434,7 +1434,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= mask = mask * Chip_Mask # %system free && sync && echo 3 > /proc/sys/vm/drop_caches && free - ## Get bad frame list by a polynominal fit + # Get bad frame list by a polynominal fit bad_frame_list = get_bad_frame_list( imgsum, fit=True, @@ -1447,7 +1447,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= ) print("The bad frame list length is: %s" % len(bad_frame_list)) - ### Creat new mask by masking the bad pixels and get new avg_img + # Creat new mask by masking the bad pixels and get new avg_img if False: mask = mask_exclude_badpixel(bp, mask, md["uid"]) avg_img = get_avg_imgc(FD, sampling=1, bad_frame_list=bad_frame_list) @@ -1482,16 +1482,16 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, ) - ############for SAXS and ANG_SAXS (Flow_SAXS) + # for SAXS and ANG_SAXS (Flow_SAXS) if scat_geometry == "saxs" or scat_geometry == "ang_saxs": # show_saxs_qmap( avg_img, setup_pargs, width=600, vmin=.1, vmax=np.max(avg_img*.1), logs=True, - # image_name= uidstr + '_img_avg', save=True) + # image_name= uidstr + '_img_avg', save=True) # np.save( data_dir + 'uid=%s--img-avg'%uid, avg_img) # try: - # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) - # except: - # hmask=1 + # hmask = create_hot_pixel_mask( avg_img, threshold = 1000, center=center, center_radius= 600) + # except Exception: + # hmask=1 hmask = 1 qp_saxs, iq_saxs, q_saxs = get_circular_average( avg_img * Chip_Mask, @@ -1510,7 +1510,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= ) # pd = trans_data_to_pd( np.where( hmask !=1), - # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') + # label=[md['uid']+'_hmask'+'x', md['uid']+'_hmask'+'y' ], dtype='list') # pd.to_csv('/XF11ID/analysis/Commissioning/eiger4M_badpixel.csv', mode='a' ) @@ -1631,9 +1631,9 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= qrt_pds = get_t_qrc(FD, time_edge, Qr, Qz, qr_map, qz_map, path=data_dir, uid=uidstr) plot_qrt_pds(qrt_pds, time_edge, qz_index=0, uid=uidstr, path=data_dir) - ############################## - ##the below works for all the geometries - ######################################## + # + # the below works for all the geometries + # if scat_geometry != "ang_saxs": roi_inten = check_ROI_intensity( avg_img, @@ -1761,7 +1761,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= # if run_one_time: # plot_g2_general( g2_dict={1:g2}, taus_dict={1:taus},vlim=[0.95, 1.05], qval_dict = qval_dict, fit_res= None, - # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') + # geometry='saxs',filename=uid_+'--g2',path= data_dir, ylabel='g2') plot_g2_general( g2_dict={1: g2, 2: g2_fit}, @@ -2116,7 +2116,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= N = len(imgs) try: tr = md["transmission"] - except: + except Exception: tr = 1 if "dose_frame" in list(run_pargs.keys()): dose_frame = run_pargs["dose_frame"] @@ -2164,7 +2164,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= times_xsvs = exposuretime + (2 ** (np.arange(len(time_steps))) - 1) * timeperframe print("The max counts are: %s" % max_cts) - ### Do historam + # Do historam if roi_avg is None: times_roi, mean_int_sets = cal_each_ring_mean_intensityc( FD, @@ -2255,7 +2255,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= path=data_dir, ) - ### Get contrast + # Get contrast contrast_factorL = get_contrast(ML_val) spec_km_pds = save_KM( spec_kmean, @@ -2511,7 +2511,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= run_dose=run_dose, report_type=scat_geometry, ) - ## Attach the PDF report to Olog + # Attach the PDF report to Olog if att_pdf_report: os.environ["HTTPS_PROXY"] = "https://proxy:8888" os.environ["no_proxy"] = "cs.nsls2.local,localhost,127.0.0.1" @@ -2519,7 +2519,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= atch = [Attachment(open(pname, "rb"))] try: update_olog_uid(uid=md["uid"], text="Add XPCS Analysis PDF Report", attachments=atch) - except: + except Exception: print( "I can't attach this PDF: %s due to a duplicated filename. Please give a different PDF file." % pname @@ -2528,7 +2528,7 @@ def run_xpcs_xsvs_single(uid, run_pargs, md_cor=None, return_res=False, reverse= if show_plot: plt.show() # else: - # plt.close('all') + # plt.close('all') if clear_plot: plt.close("all") if return_res: diff --git a/pyCHX/v2/_futurepyCHX/movie_maker.py b/pyCHX/v2/_futurepyCHX/movie_maker.py index bade9de..ac2a91a 100644 --- a/pyCHX/v2/_futurepyCHX/movie_maker.py +++ b/pyCHX/v2/_futurepyCHX/movie_maker.py @@ -1,6 +1,6 @@ -################################ -######Movie_maker############### -################################ +# +# Movie_maker# +# def read_imgs(inDir): @@ -33,7 +33,7 @@ def select_regoin( try: img_[ys:ye, xs:xe] = True - except: + except Exception: img_[ys:ye, xs:xe, :] = True pixellist_ = np.where(img_.ravel())[0] # pixellist_ = img_.ravel() @@ -49,7 +49,7 @@ def select_regoin( else: try: imgx = img[ys:ye, xs:xe] - except: + except Exception: imgx = img[ys:ye, xs:xe, :] return imgx @@ -89,7 +89,7 @@ def save_png_series( save png files """ - if uid == None: + if uid is None: uid = "uid" num_frame = 0 for img in imgs: @@ -191,8 +191,8 @@ def movie_maker( Returns ------- - #ani : - # movie + # ani : + # movie """ diff --git a/pyCHX/v2/_futurepyCHX/xpcs_timepixel.py b/pyCHX/v2/_futurepyCHX/xpcs_timepixel.py index 264da7e..690acc4 100644 --- a/pyCHX/v2/_futurepyCHX/xpcs_timepixel.py +++ b/pyCHX/v2/_futurepyCHX/xpcs_timepixel.py @@ -69,7 +69,7 @@ def get_timepixel_data(data_dir, filename, time_unit=1): """ data = pds.read_csv(data_dir + filename) - #'#Col', ' #Row', ' #ToA', + # '#Col', ' #Row', ' #ToA', # return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps if time_unit != 1: try: @@ -78,7 +78,7 @@ def get_timepixel_data(data_dir, filename, time_unit=1): np.array(data["#Row"]), np.array(data["#ToA"]) * time_unit, ) - except: + except Exception: x, y, t = ( np.array(data["#Col"]), np.array(data[" #Row"]), @@ -91,7 +91,7 @@ def get_timepixel_data(data_dir, filename, time_unit=1): np.array(data["#Row"]), np.array(data["#ToA"]), ) - except: + except Exception: x, y, t = ( np.array(data["#Col"]), np.array(data[" #Row"]), @@ -175,7 +175,7 @@ def get_FD_end_num(FD, maxend=1e10): for i in range(0, int(maxend)): try: FD.seekimg(i) - except: + except Exception: N = i break FD.seekimg(0) @@ -313,8 +313,8 @@ def init_compress_timepix_data(pos, t, binstep, filename, mask=None, md=None, no # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -408,8 +408,8 @@ def init_compress_timepix_data_light_duty( # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -496,8 +496,8 @@ def compress_timepix_data_old(data_pixel, filename, rois=None, md=None, nobytes= # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -548,8 +548,8 @@ def __init__( """ indexable: a images sequences pixelist: 1-D array, interest pixel list - #flat_correction, normalized by flatfield - #norm, normalized by total intensity, like a incident beam intensity + # flat_correction, normalized by flatfield + # norm, normalized by total intensity, like a incident beam intensity """ self.hitime = hitime self.tbins = tbins @@ -728,7 +728,7 @@ def get_timepixel_g2(oned_count): return np.correlate(oned_count, oned_count, mode="full")[-n:] / norm -######################################### +# T = True F = False @@ -816,7 +816,7 @@ def make_qlist(self): qlist[1::2] = round(qradi + (1 + qwidth) / 2) # render odd value qlist[::2] = int_(qradi - qwidth / 2) # render even value qlist[1::2] = int_(qradi + (1 + qwidth) / 2) # render odd value - if qlist_ != None: + if qlist_ is not None: qlist = qlist_ return qlist, qradi @@ -864,9 +864,9 @@ def calqlist(self, qmask=None, shape="circle"): nopr, bins = histogram(qind, bins=range(len(qradi) + 1)) return qind, pixellist, nopr, nopixels - ########################################################################### - ########for one_time correlation function for xyt frames - ################################################################## + # + # for one_time correlation function for xyt frames + # def autocor_xytframe(self, n): """Do correlation for one xyt frame--with data name as n""" @@ -932,7 +932,7 @@ def show(self, g2p, title): plt.show() -###################################################### +# if False: xp = xpcs() @@ -943,6 +943,6 @@ def show(self, g2p, title): g2 = xp.autocor(fnum) filename = "g2_-%s-" % (fnum) save(RES_DIR + FOUT + filename, g2) - ##g2= load(RES_DIR + FOUT + filename +'.npy') + # g2= load(RES_DIR + FOUT + filename +'.npy') g2p = xp.g2_to_pds(dly, g2, tscale=20) xp.show(g2p, "g2_run_%s" % fnum) diff --git a/pyCHX/xpcs_timepixel.py b/pyCHX/xpcs_timepixel.py index 85080c5..81a9b9b 100644 --- a/pyCHX/xpcs_timepixel.py +++ b/pyCHX/xpcs_timepixel.py @@ -10,52 +10,24 @@ import numpy as np import pandas as pds from numpy import ( - apply_over_axes, arange, - arctan, - around, - array, digitize, dot, - exp, histogram, - histogramdd, hstack, hypot, indices, int_, intersect1d, linspace, - load, - log, - log10, - ma, - mean, - mgrid, - ones, - pi, - poly1d, - polyfit, - power, - ravel, - reshape, round, save, - shape, - sin, - sqrt, - std, - sum, - unique, - vstack, where, zeros, zeros_like, ) -from numpy.linalg import lstsq from tqdm import tqdm -from pyCHX.chx_compress import Multifile, go_through_FD, pass_FD from pyCHX.chx_libs import multi_tau_lags @@ -69,17 +41,17 @@ def get_timepixel_data(data_dir, filename, time_unit=1): """ data = pds.read_csv(data_dir + filename) - #'#Col', ' #Row', ' #ToA', + # '#Col', ' #Row', ' #ToA', # return np.array( data['Col'] ), np.array(data['Row']), np.array(data['GlobalTimeFine']) #*6.1 #in ps if time_unit != 1: try: x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) * time_unit - except: + except Exception: x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) * time_unit else: try: x, y, t = np.array(data["#Col"]), np.array(data["#Row"]), np.array(data["#ToA"]) - except: + except Exception: x, y, t = np.array(data["#Col"]), np.array(data[" #Row"]), np.array(data[" #ToA"]) return x, y, t - t.min() # * 25/4096. #in ns @@ -159,7 +131,7 @@ def get_FD_end_num(FD, maxend=1e10): for i in range(0, int(maxend)): try: FD.seekimg(i) - except: + except Exception: N = i break FD.seekimg(0) @@ -278,8 +250,8 @@ def init_compress_timepix_data(pos, t, binstep, filename, mask=None, md=None, no # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -375,8 +347,8 @@ def init_compress_timepix_data_light_duty( # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -465,8 +437,8 @@ def compress_timepix_data_old(data_pixel, filename, rois=None, md=None, nobytes= # TODList: for different detector using different md structure, March 2, 2017, # 8d include, - #'bytes', 'nrows', 'ncols', (detsize) - #'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) + # 'bytes', 'nrows', 'ncols', (detsize) + # 'rows_begin', 'rows_end', 'cols_begin', 'cols_end' (roi) Header = struct.pack( "@16s8d7I916x", b"Version-COMPtpx1", @@ -507,8 +479,8 @@ def __init__( """ indexable: a images sequences pixelist: 1-D array, interest pixel list - #flat_correction, normalized by flatfield - #norm, normalized by total intensity, like a incident beam intensity + # flat_correction, normalized by flatfield + # norm, normalized by total intensity, like a incident beam intensity """ self.hitime = hitime self.tbins = tbins @@ -611,7 +583,7 @@ def get_timepixel_avg_image(x, y, t, det_shape=[256, 256], delta_time=None): """ - t0 = t.min() + t.min() tm = t.max() if delta_time is not None: @@ -687,7 +659,7 @@ def get_timepixel_g2(oned_count): return np.correlate(oned_count, oned_count, mode="full")[-n:] / norm -######################################### +# T = True F = False @@ -775,7 +747,7 @@ def make_qlist(self): qlist[1::2] = round(qradi + (1 + qwidth) / 2) # render odd value qlist[::2] = int_(qradi - qwidth / 2) # render even value qlist[1::2] = int_(qradi + (1 + qwidth) / 2) # render odd value - if qlist_ != None: + if qlist_ is not None: qlist = qlist_ return qlist, qradi @@ -808,7 +780,7 @@ def calqlist(self, qmask=None, shape="circle"): else: pass r = r.flatten() - noqrs = len(qlist) + len(qlist) qind = digitize(r, qlist) if qmask is None: w_ = where((qind) % 2) # qind should be odd;print 'Yes' @@ -823,9 +795,9 @@ def calqlist(self, qmask=None, shape="circle"): nopr, bins = histogram(qind, bins=range(len(qradi) + 1)) return qind, pixellist, nopr, nopixels - ########################################################################### - ########for one_time correlation function for xyt frames - ################################################################## + # + # for one_time correlation function for xyt frames + # def autocor_xytframe(self, n): """Do correlation for one xyt frame--with data name as n""" @@ -891,7 +863,7 @@ def show(self, g2p, title): plt.show() -###################################################### +# if False: xp = xpcs() @@ -902,6 +874,6 @@ def show(self, g2p, title): g2 = xp.autocor(fnum) filename = "g2_-%s-" % (fnum) save(RES_DIR + FOUT + filename, g2) - ##g2= load(RES_DIR + FOUT + filename +'.npy') + # g2= load(RES_DIR + FOUT + filename +'.npy') g2p = xp.g2_to_pds(dly, g2, tscale=20) xp.show(g2p, "g2_run_%s" % fnum) diff --git a/run_tests.py b/run_tests.py index 15a620f..660e8ef 100644 --- a/run_tests.py +++ b/run_tests.py @@ -1,5 +1,4 @@ -#!/usr/bin/env python -import os +# !/usr/bin/env python import sys import pytest diff --git a/versioneer.py b/versioneer.py index 1cb0d80..7d475a6 100644 --- a/versioneer.py +++ b/versioneer.py @@ -23,13 +23,13 @@ system, and maybe making new tarballs. -## Quick Install +# Quick Install * `pip install versioneer` to somewhere to your $PATH * add a `[versioneer]` section to your setup.cfg (see below) * run `versioneer install` in your source tree, commit the results -## Version Identifiers +# Version Identifiers Source trees come from a variety of places: @@ -66,7 +66,7 @@ * to allow the module to self-identify its version: `myproject.__version__` * to choose a name and prefix for a 'setup.py sdist' tarball -## Theory of Operation +# Theory of Operation Versioneer works by adding a special `_version.py` file into your source tree, where your `__init__.py` can import it. This `_version.py` knows how to @@ -84,7 +84,7 @@ sdist` to replace `_version.py` with a small static file that contains just the generated version data. -## Installation +# Installation First, decide on values for the following configuration variables: @@ -185,7 +185,7 @@ `versioneer install` will mark everything it touched for addition using `git add`. Don't forget to add `setup.py` and `setup.cfg` too. -## Post-Installation Usage +# Post-Installation Usage Once established, all uses of your tree from a VCS checkout should get the current version string. All generated tarballs should include an embedded @@ -206,7 +206,7 @@ Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at least one tag in its history. -## Version-String Flavors +# Version-String Flavors Code which uses Versioneer can learn about its version string at runtime by importing `_version` from your main `__init__.py` file and running the @@ -248,7 +248,7 @@ __version__ = get_versions()['version'] del get_versions -## Styles +# Styles The setup.cfg `style=` configuration controls how the VCS information is rendered into a version string. @@ -266,7 +266,7 @@ Other styles are available. See details.md in the Versioneer source tree for descriptions. -## Debugging +# Debugging Versioneer tries to avoid fatal errors: if something goes wrong, it will tend to return a version of "0+unknown". To investigate the problem, run `setup.py @@ -274,7 +274,7 @@ display the full contents of `get_versions()` (including the `error` string, which may help identify what went wrong). -## Updating Versioneer +# Updating Versioneer To upgrade your project to a new release of Versioneer, do the following: @@ -285,7 +285,7 @@ `SRC/_version.py` * commit any changed files -### Upgrading to 0.15 +# Upgrading to 0.15 Starting with this version, Versioneer is configured with a `[versioneer]` section in your `setup.cfg` file. Earlier versions required the `setup.py` to @@ -298,7 +298,7 @@ install`. In 0.14 and earlier, the executable was named `versioneer-installer` and was run without an argument. -### Upgrading to 0.14 +# Upgrading to 0.14 0.14 changes the format of the version string. 0.13 and earlier used hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a @@ -306,17 +306,17 @@ components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old format, but should be ok with the new one. -### Upgrading from 0.11 to 0.12 +# Upgrading from 0.11 to 0.12 Nothing special. -### Upgrading from 0.10 to 0.11 +# Upgrading from 0.10 to 0.11 You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running `setup.py setup_versioneer`. This will enable the use of additional version-control systems (SVN, etc) in the future. -## Future Directions +# Future Directions This tool is designed to make it easily extended to other version-control systems: all VCS-specific components are in separate directories like @@ -330,7 +330,7 @@ number of intermediate scripts. -## License +# License To make Versioneer easier to embed, all its code is hereby released into the public domain. The `_version.py` that it creates is also in the public @@ -1449,12 +1449,12 @@ def run(self): # we override "build_py" in both distutils and setuptools # # most invocation pathways end up running build_py: - # distutils/build -> build_py - # distutils/install -> distutils/build ->.. - # setuptools/bdist_wheel -> distutils/install ->.. - # setuptools/bdist_egg -> distutils/install_lib -> build_py - # setuptools/install -> bdist_egg ->.. - # setuptools/develop -> ? + # distutils/build -> build_py + # distutils/install -> distutils/build ->.. + # setuptools/bdist_wheel -> distutils/install ->.. + # setuptools/bdist_egg -> distutils/install_lib -> build_py + # setuptools/install -> bdist_egg ->.. + # setuptools/develop -> ? from distutils.command.build_py import build_py as _build_py @@ -1562,12 +1562,12 @@ def make_release_tree(self, base_dir, files): # resulting files. [versioneer] -#VCS = git -#style = pep440 -#versionfile_source = -#versionfile_build = -#tag_prefix = -#parentdir_prefix = +# VCS = git +# style = pep440 +# versionfile_source = +# versionfile_build = +# tag_prefix = +# parentdir_prefix = """