diff --git a/rsciio/bruker/_api.py b/rsciio/bruker/_api.py index 852daea8..e96455a7 100644 --- a/rsciio/bruker/_api.py +++ b/rsciio/bruker/_api.py @@ -1,7 +1,7 @@ # -*- coding: utf-8 -*- # -# Copyright 2016 Petras Jokubauskas -# Copyright 2016 The HyperSpy developers +# Copyright 2023 Petras Jokubauskas +# Copyright 2023 The HyperSpy developers # # This library is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by @@ -18,10 +18,10 @@ # If not, see . # # This python library subset provides read functionality of -# Bruker bcf files. +# Bruker bcf and spx files. # The basic reading capabilities of proprietary AidAim Software(tm) -# SFS (Single File System) (used in bcf technology) is present in -# the same library. +# SFS (Single File System) used in bcf technology is exposed for +# eventual reuse for other Bruker files. # Plugin characteristics @@ -30,7 +30,7 @@ from math import ceil import logging from zlib import decompress as unzip_block -from struct import unpack as strct_unp +from struct import unpack from datetime import datetime from ast import literal_eval import codecs @@ -46,6 +46,12 @@ from rsciio.docstrings import FILENAME_DOC, LAZY_DOC, RETURNS_DOC +__all__ = [ + "BCFReader", + "SFSReader", + "spectra_from_xml", +] + _logger = logging.getLogger(__name__) warn_once = True @@ -71,22 +77,23 @@ class Container(object): class SFSTreeItem(object): - """Class to manage one internal sfs file. - - Reading, reading in chunks, reading and extracting, reading without - extracting even if compression is pressent. + """Class to manage one internal SFS file. + This class provides means to Read, read in blocks, read and extract + compressed blocks and read without extracting even if compression + is present (for debuging purpoises). Attributes: item_raw_string -- the bytes from sfs file table describing the file - parent -- the item higher hierarchicaly in the sfs file tree + parent -- the index of parent item in SFS file table. + The index of root is -1. Methods: read_piece, setup_compression_metadata, get_iter_and_properties, get_as_BytesIO_string """ - def __init__(self, item_raw_string, parent): - self.sfs = parent + def __init__(self, item_raw_string, sfs): + self.sfs = sfs ( self._pointer_to_pointer_table, self.size, @@ -100,7 +107,7 @@ def __init__(self, item_raw_string, parent): _, name, _, - ) = strct_unp("" def _calc_pointer_table_size(self): n_chunks = ceil(self.size / self.sfs.usable_chunk) @@ -119,13 +131,10 @@ def _fill_pointer_table(self): self.pointer is the sfs pointer table containing addresses of every chunk of the file. - The pointer table if the file is big can extend throught many - sfs chunks. Differently than files, the pointer table of file have no - table of pointers to the chunks. Instead if pointer table is larger - than sfs chunk, the chunk header contains next chunk number (address - can be calculated using known chunk size and global offset) with - continuation of file pointer table, thus it have to be read and filled - consecutive. + The pointer table, if the internal file is large enough, can extend + throught many sfs blocks. In case pointer table or its continuation + do not fit inside single SFS block, then the block's header contains + next block index. """ # table size in number of chunks: n_of_chunks = ceil(self.size_in_chunks / (self.sfs.usable_chunk // 4)) @@ -135,7 +144,7 @@ def _fill_pointer_table(self): temp_string = io.BytesIO() for dummy1 in range(n_of_chunks): fn.seek(self.sfs.chunksize * next_chunk + 0x118) - next_chunk = strct_unp("= size: @@ -1133,7 +1178,7 @@ def py_parse_hypermap(virtual_file, shape, dtype, downsample=1): dummy_size1, n_of_pulses, data_size2, - ) = strct_unp("> 4) + gain pixel += g[:channels] else: length = int(channels * size_p / 2) - temp = strct_unp( + temp = unpack( "<" + channels * st[size_p], buffer1[offset : offset + length], ) @@ -1211,14 +1256,14 @@ def py_parse_hypermap(virtual_file, shape, dtype, downsample=1): pixel += rest * [0] # additional data size: if n_of_pulses > 0: - add_s = strct_unp("= size: buffer1 = buffer1[offset:] + next(iter_data) size = size_chnk + size - offset offset = 0 # the additional pulses: - add_pulses = strct_unp( + add_pulses = unpack( "<" + "H" * n_of_pulses, buffer1[offset : offset + add_s] ) offset += add_s @@ -1352,7 +1397,7 @@ def bcf_reader( """ # objectified bcf file: - obj_bcf = BCF_reader(filename, instrument=instrument) + obj_bcf = BCFReader(filename, instrument=instrument) if select_type == "image": return bcf_images(obj_bcf) elif select_type == "spectrum_image": diff --git a/rsciio/bruker/api.py b/rsciio/bruker/api.py new file mode 100644 index 00000000..24a99282 --- /dev/null +++ b/rsciio/bruker/api.py @@ -0,0 +1 @@ +from ._api import * diff --git a/rsciio/tests/test_bruker.py b/rsciio/tests/test_bruker.py index ea224330..50d1a47f 100644 --- a/rsciio/tests/test_bruker.py +++ b/rsciio/tests/test_bruker.py @@ -239,11 +239,11 @@ def test_wrong_file(): def test_fast_bcf(): thingy = pytest.importorskip("rsciio.bruker.unbcf_fast") - from rsciio.bruker import _api + from rsciio.bruker import _api, api for bcffile in test_files: filename = os.path.join(my_path, "bruker_data", bcffile) - thingy = _api.BCF_reader(filename) + thingy = api.BCFReader(filename) for j in range(2, 5, 1): print("downsampling:", j) _api.fast_unbcf = True # manually enabling fast parsing