From 77be93258fcc4e26bed86e14a90f39bb965e105d Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 15 Feb 2024 13:44:52 -0500 Subject: [PATCH 01/38] ignore local testing script --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 1e681b8..92f7be3 100644 --- a/.gitignore +++ b/.gitignore @@ -11,4 +11,5 @@ build *.egg-info *.egg-info/* .DS_Store -tests/tutorials \ No newline at end of file +tests/tutorials +_test.py From 2d2f8fa35a479613dc4ae25ca4cf96244790ccb6 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 15 Feb 2024 13:54:05 -0500 Subject: [PATCH 02/38] ignore local test notebook --- .gitignore | 1 + 1 file changed, 1 insertion(+) diff --git a/.gitignore b/.gitignore index 92f7be3..4cb2e2b 100644 --- a/.gitignore +++ b/.gitignore @@ -13,3 +13,4 @@ build .DS_Store tests/tutorials _test.py +_*.ipynb \ No newline at end of file From 3cfc0cf5fc77feca7bd6c25e91e9f862b72c6d88 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Tue, 20 Feb 2024 17:13:51 -0500 Subject: [PATCH 03/38] object-oriented PvDataset under api module --- brkraw/api/__init__.py | 7 + brkraw/api/pvobj/__init__.py | 3 + brkraw/api/pvobj/dataset.py | 289 ++++++++++++++++++++++ brkraw/api/pvobj/parser.py | 456 +++++++++++++++++++++++++++++++++++ 4 files changed, 755 insertions(+) create mode 100755 brkraw/api/__init__.py create mode 100755 brkraw/api/pvobj/__init__.py create mode 100755 brkraw/api/pvobj/dataset.py create mode 100755 brkraw/api/pvobj/parser.py diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py new file mode 100755 index 0000000..cabd18a --- /dev/null +++ b/brkraw/api/__init__.py @@ -0,0 +1,7 @@ +import pvobj + +__all__ = ['pvobj', 'BrukerLoader'] + +class BrukerLoader: + def __init__(self, path): + self._pvobj = pvobj.PvDataset(path) diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py new file mode 100755 index 0000000..cf2f931 --- /dev/null +++ b/brkraw/api/pvobj/__init__.py @@ -0,0 +1,3 @@ +from dataset import PvDataset + +__all__ = ["PvDataset"] \ No newline at end of file diff --git a/brkraw/api/pvobj/dataset.py b/brkraw/api/pvobj/dataset.py new file mode 100755 index 0000000..819c715 --- /dev/null +++ b/brkraw/api/pvobj/dataset.py @@ -0,0 +1,289 @@ +import os +import re +import zipfile +from collections import OrderedDict +try: + from .parser import Parameter +except ImportError: + # case for debugging + from brkraw.api.pvobj.parser import Parameter + +class BaseMethods: + _scan_id = None + _reco_id = None + _path = None + _rootpath = None + _contents = None + _parameter_files = None + _binary_files = None + + @staticmethod + def _fetch_dir(path): + """Searches for directories and files in a given directory and returns the directory structure. + + Args: + path: The path to the directory. + + Returns: + dict: A dictionary representing the directory structure. + The keys are the relative paths of the directories, and the values are dictionaries with the following keys: + - 'dirs': A list of directory names. + - 'files': A list of file names. + - 'file_indexes': An empty list. + """ + contents = OrderedDict() + abspath = os.path.abspath(path) + for dirpath, dirnames, filenames in os.walk(abspath): + normalized_dirpath = os.path.normpath(dirpath) + relative_path = os.path.relpath(normalized_dirpath, abspath) + contents[relative_path] = {'dirs': dirnames, 'files': filenames, 'file_indexes': []} + return contents + + @staticmethod + def _fetch_zip(path): + """Searches for files in a zip file and returns the directory structure and file information. + + Args: + path: The path to the zip file. + + Returns: + dict: A dictionary representing the directory structure and file information. + The keys are the directory paths, and the values are dictionaries with the following keys: + - 'dirs': A set of directory names. + - 'files': A list of file names. + - 'file_indexes': A list of file indexes. + """ + with zipfile.ZipFile(path) as zip_file: + all_paths = {os.path.dirname(item.filename) for item in zip_file.infolist() if item.is_dir()} + contents = OrderedDict({path: {'dirs': set(), 'files': [], 'file_indexes': []} for path in all_paths}) + for i, item in enumerate(zip_file.infolist()): + if not item.is_dir(): + dirpath, filename = os.path.split(item.filename) + contents[dirpath]['files'].append(filename) + contents[dirpath]['file_indexes'].append(i) + # Add missing parent directories + parent_path = dirpath + while parent_path != '': + parent_path = os.path.dirname(parent_path) + if parent_path not in contents: + contents[parent_path] = {'dirs': set(), 'files': [], 'file_indexes': []} + for sub_path in all_paths: + parent_path, dirname = os.path.split(sub_path.rstrip('/')) + if parent_path in contents: + contents[parent_path]['dirs'].add(dirname) + return contents + + def _open_as_fileobject(self, key): + """Opens a file object for the given key. + + Args: + key: The key to identify the file. + + Returns: + file object: The opened file object. + + Raises: + ValueError: If the key does not exist in the files. + """ + contents = self._contents if 'files' in self._contents else self._contents[list(self._contents.keys())[0]] + rootpath = self._rootpath if 'files' in self._contents else self._path + files = contents.get('files') + + if key not in files: + raise ValueError(f'file not exists. [{",".join(files)}]') + + if file_indexes := contents.get('file_indexes'): + with zipfile.ZipFile(rootpath) as zf: + idx = file_indexes[files.index(key)] + return zf.open(zf.namelist()[idx]) + else: + path_list = [rootpath, *(str(self._scan_id) if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] + path = os.path.join(*path_list) + return open(path, 'rb') + + def _open_as_binary(self, key): + """Opens a file as binary and reads its contents. + + Args: + key: The key to identify the file. + + Returns: + bytes: The binary contents of the file. + """ + return self._open_as_fileobject(key).read() + + def _open_as_string(self, key): + """Opens a file as binary, decodes it as UTF-8, and splits it into lines. + + Args: + key: The key to identify the file. + + Returns: + list: The lines of the file as strings. + """ + return self._open_as_binary(key).decode('UTF-8').split('\n') + + def __getitem__(self, key): + """Returns the value associated with the given key. + + Args: + key: The key to retrieve the value. + + Returns: + object: The value associated with the key. + + Raises: + KeyError: If the key is not found. + """ + if key in self._parameter_files: + return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) + elif key in self._binary_files: + return self._open_as_binary(key) + else: + return self._open_as_fileobject(key) + + +class PvDataset(BaseMethods): + def __init__(self, path): + self._check_dataset_validity(path) + self._construct() + if root_content := [c for c in self._contents.values() if 'subject' in c['files']]: + setattr(self, 'subject', root_content) + + # internal method + def _check_dataset_validity(self, path): + """ + Checks the validity of a given dataset path. + + Note: This method only checks the validity of the dataset to be fetched using `fetch_dir` and `fetch_zip`, + and does not check the validity of a `PvDataset`. + + Args: + path (str): The path to check. + + Raises: + FileNotFoundError: If the path does not exist. + ValueError: If the path is not a directory or a file, or if it does not meet the required criteria. + + Returns: + None + """ + self._path = os.path.abspath(path) + if not os.path.exists(self._path): + raise FileNotFoundError(f"The path '{self._path}' does not exist.") + if os.path.isdir(self._path): + self._contents = self._fetch_dir(self._path) + self.is_compressed = False + elif os.path.isfile(self._path) and zipfile.is_zipfile(self._path): + self._contents = self._fetch_zip(self._path) + self.is_compressed = True + else: + raise ValueError(f"The path '{self._path}' does not meet the required criteria.") + + def _construct(self): + """ + Constructs the object by organizing the contents. + + This method constructs the object by organizing the contents based on the provided directory structure. + It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. + After processing, it removes the processed paths from the `_contents` dictionary. + + Returns: + None + """ + self._scans = OrderedDict() + self._backup = OrderedDict() + to_remove = [] + for path, contents in sorted(self._contents.items()): + if not path: + self._root = contents + to_remove.append(path) + else: + if matched := re.match(r'(?:.*/)?(\d+)/pdata/(\d+)$', path) or re.match( + r'(?:.*/)?(\d+)$', path + ): + scan_id = int(matched.group(1)) + if scan_id not in self._scans: + self._scans[scan_id] = PvScan(scan_id, (self.path, path)) + if 'pdata' in contents['dirs']: + self._scans[scan_id].update(contents) + elif len(matched.groups()) == 2: + reco_id = int(matched.group(2)) + self._scans[scan_id].set_reco(path, reco_id, contents) + + to_remove.append(path) + if not contents['files']: + to_remove.append(path) + elif 'subject' not in contents['files'] and path not in to_remove: + self._backup[path] = contents + to_remove.append(path) + + for path in to_remove: + del self._contents[path] + + @property + def path(self): + """ + Gets the path of the object. + + Returns: + str: The path of the object. + """ + return self._path + + @property + def avail(self): + return list(self._scans) + + def get_scan(self, scan_id): + return self._scans[scan_id] + + def get_reco(self, scan_id, reco_id): + return self.get_scan(scan_id).get_reco(reco_id) + + def __dir__(self): + return ['path', 'avail', 'get_scan', 'get_reco'] + + + +class PvScan(BaseMethods): + def __init__(self, scan_id, pathes, contents=None, recos=None): + self._scan_id = scan_id + self._rootpath, self._path = pathes + self.update(contents) + self._recos = OrderedDict(recos) if recos else OrderedDict() + + def update(self, contents): + self._contents = contents + + def set_reco(self, path, reco_id, contents): + self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) + + def get_reco(self, reco_id): + return self._recos[reco_id] + + @property + def path(self): + return self._path + + @property + def avail(self): + return list(self._recos) + + def __dir__(self): + return ['path', 'avail', 'get_reco'] + + +class PvReco(BaseMethods): + def __init__(self, scan_id, reco_id, pathes, contents): + self._scan_id = scan_id + self._reco_id = reco_id + self._rootpath, self._path = pathes + self._contents = contents + + @property + def path(self): + return self._path + + def __dir__(self): + return ['path'] \ No newline at end of file diff --git a/brkraw/api/pvobj/parser.py b/brkraw/api/pvobj/parser.py new file mode 100755 index 0000000..59d9994 --- /dev/null +++ b/brkraw/api/pvobj/parser.py @@ -0,0 +1,456 @@ +import re +import numpy as np +from collections import OrderedDict, defaultdict +from copy import copy + +# REGEX patterns +ptrn_param = r'^\#\#(?P.*)\=(?P.*)$' +ptrn_key = r'^\$(?P.*)' +ptrn_array = r"\((?P[^()]*)\)" +ptrn_complex_array = r"\((?P\(.*)\)$" +ptrn_comment = r'\$\$.*' +ptrn_float = r'^-?\d+\.\d+$' +ptrn_engnotation = r'^-?[0-9.]+e-?[0-9.]+$' +ptrn_integer = r'^[-]*\d+$' +ptrn_string = r'^\<(?P[^>]*)\>$' +ptrn_arraystring = r'\<(?P[^>]*)\>[,]*' +ptrn_bisstring = r'\<(?P\$Bis[^>]*)\#\>' +ptrn_braces = r'\((?P[^()]*)\)' +# Paravision 360 related. @[number of repititions]([number]) ex) @5(0) +ptrn_at_array = r'@(\d*)\*\(([-]?\d*[.]?\d*[eE]?[-]?\d*?)\)' + +# Conditional enum +HEADER = 0 +PARAMETER = 1 + + +class Parser: + """ + Parser class for handling parameter dictionaries. + + This class provides methods for loading parameters from a list of strings, converting strings to specific data types, cleaning up array elements, processing complex arrays, parsing shapes, parsing data, parsing array data, and converting data to specified shapes. + + Methods: + load_param: JCAMP DX parser that loads parameters from a list of strings. + convert_string_to: Converts a string to a specific data type if it matches certain patterns. + clean_up_elements_in_array: Cleans up array elements by replacing patterns with repeated values. + process_bisarray: Determines the case of an array with BIS prefix by converting each element to a specific data type. + process_complexarray: Process a complex array and return a parsed dictionary. + process_string: Process a string and return the parsed data based on its shape. + parse_shape: Parse the shape of the data. + parse_data: Parse the data based on its format. + parse_array_data: Parse the array data. + convert_data_to: Convert the given data to the specified shape. + """ + @staticmethod + def load_param(stringlist): + """JCAMP DX parser that loads parameters from a list of strings. + + Args: + stringlist (list): A list of strings containing parameter information. + + Returns: + params (OrderedDict): An ordered dictionary containing the parsed parameters, where the key is the line number and the value is a tuple of the parameter type, key, and value. + param_addresses (list): A list of line numbers where parameters were found. + stringlist (list): The original list of strings. + """ + params = OrderedDict() + param_addresses = [] + compiled_ptrn_param = re.compile(ptrn_param) + compiled_ptrn_key = re.compile(ptrn_key) + + for line_num, line in enumerate(stringlist): + if regex_obj := compiled_ptrn_param.match(line): + key = regex_obj['key'] + value = regex_obj['value'] + if compiled_ptrn_key.match(key): + key = re.sub(ptrn_key, r'\g', key) + params[line_num] = (PARAMETER, key, value) + else: + params[line_num] = (HEADER, key, value) + param_addresses.append(line_num) + return params, param_addresses, stringlist + + + @staticmethod + def convert_string_to(string): + """Converts a string to a specific data type if it matches certain patterns. + + Args: + string (str): The string to be converted. + + Returns: + float, int, or str or None: The converted value of the string, or None if the string is empty. + """ + string = string.strip() + if re.match(ptrn_string, string): + string = re.sub(ptrn_string, r'\g', string) + if not string: + return None + if re.match(ptrn_float, string) or re.match(ptrn_engnotation, string): + return float(string) + elif re.match(ptrn_integer, string): + return int(string) + return string + + @staticmethod + def clean_up_elements_in_array(data): + """Cleans up array elements by replacing patterns with repeated values. + + Args: + elements (list): A list of array elements with patterns. + + Returns: + list: The cleaned up array elements. + """ + elements = re.findall(ptrn_at_array, data) + elements = list(set(elements)) + for str_ptn in elements: + num_cnt = int(str_ptn[0]) + num_repeat = float(str_ptn[1]) + str_ptn = f"@{str_ptn[0]}*({str_ptn[1]})" + + str_replace_old = str_ptn + str_replace_new = [num_repeat for _ in range(num_cnt)] + str_replace_new = str(str_replace_new) + str_replace_new = str_replace_new.replace(",", "") + str_replace_new = str_replace_new.replace("[", "") + str_replace_new = str_replace_new.replace("]", "") + data = data.replace(str_replace_old, str_replace_new) + return data + + @staticmethod + def process_bisarray(elements): + """Determines the case of an array with BIS prefix by converting each element to a specific data type. + + Args: + elements (list): A list of elements representing a bisarray. + + Returns: + float, int, or list: The converted elements of the bisarray. If there is only one element, it is returned as is, otherwise a list of converted elements is returned. + """ + elements = [Parser.convert_string_to(c) for c in elements] + return elements.pop() if len(elements) == 1 else elements + + @staticmethod + def process_complexarray(data): + """ + Process a complex array and return a parsed dictionary. + + Args: + data: The complex array to be processed. + + Returns: + dict: A dictionary containing the parsed data. + + Examples: + >>> data = [1, [2, 3], [[4, 5], [6, 7]]] + >>> process_complexarray(data) + {'level_1': [[1]], 'level_2': [[2, 3]], 'level_3': [[4, 5], [6, 7]]} + """ + data_holder = copy(data) + parser = defaultdict(list) + level = 1 + while re.search(ptrn_braces, data_holder): + for parsed in re.finditer(ptrn_braces, data_holder): + cont_parser = [Parser.convert_data_to(cont.strip(), -1) for cont in parsed.group('contents').split(',') if Parser.convert_data_to(cont.strip(), -1) is not None] + parser[f'level_{level}'].append(cont_parser) + data_holder = re.sub(ptrn_braces, '', data_holder) + level += 1 + return dict(parser) + + @staticmethod + def process_string(data, shape): + """ + Process a string and return the parsed data based on its shape. + + Args: + data: The string to be processed. + shape: The shape of the data. + + Returns: + tuple: A tuple containing the parsed data and an empty string, or the processed string. + + Examples: + >>> data = "[1, 2, 3]" + >>> shape = "(3,)" + >>> process_string(data, shape) + ([1, 2, 3], '') + + >>> data = "Hello, World!" + >>> shape = "" + >>> process_string(data, shape) + 'Hello, World!' + """ + if elements := re.findall(ptrn_bisstring, data): + return Parser.process_bisarray(elements) + + data = Parser.clean_up_elements_in_array(data) + if re.match(ptrn_complex_array, data): + return Parser.process_complexarray(data) + elif re.match(ptrn_string, data): + return re.sub(ptrn_string, r'\g', data) + + shape = Parser.parse_shape(shape) + data = Parser.parse_data(data) + return data + + @staticmethod + def parse_shape(shape): + """ + Parse the shape of the data. + + Args: + shape: The shape of the data. + + Returns: + str: The parsed shape. + + Raises: + ValueError: If the shape is invalid. + + Examples: + >>> shape = "(3, 4)" + >>> parse_shape(shape) + '3, 4' + + >>> shape = "3, 4" + >>> parse_shape(shape) + '3, 4' + + >>> shape = "(3, 4, 5)" + >>> parse_shape(shape) + '3, 4, 5' + + >>> shape = "(3, 4,)" + >>> parse_shape(shape) + ValueError: Invalid shape: (3, 4,) + """ + if shape != -1: + shape = re.sub(ptrn_array, r'\g', shape) + if ',' in shape: + return [Parser.convert_string_to(c) for c in shape.split(',')] + return shape + + @staticmethod + def parse_data(data): + """ + Parse the data based on its format. + + Args: + data: The data to be parsed. + + Returns: + list or str: The parsed data. + + Examples: + >>> data = "[1, 2, 3]" + >>> parse_data(data) + [1, 2, 3] + + >>> data = "1, 2, 3" + >>> parse_data(data) + [1, 2, 3] + + >>> data = "1 2 3" + >>> parse_data(data) + [1, 2, 3] + + >>> data = "Hello, World!" + >>> parse_data(data) + 'Hello, World!' + """ + if matched := re.findall(ptrn_array, data): + return Parser.parse_array_data(matched) + elif ',' in data: + return [Parser.convert_string_to(c) for c in data.split(',')] + elif ' ' in data: + return [Parser.convert_string_to(c) for c in data.split(' ')] + return data + + @staticmethod + def parse_array_data(matched): + """ + Parse the array data. + + Args: + matched: A list of strings representing the matched array data. + + Returns: + list: The parsed array data. + + Examples: + This method is intended to be called internally within the class and does not have direct usage examples. + """ + if any(',' in cell for cell in matched): + return [[Parser.convert_string_to(c) for c in cell.split(',')] for cell in matched] + return [Parser.convert_string_to(c) for c in matched] + + @staticmethod + def convert_data_to(data, shape): + """ + Convert the given data to the specified shape. + + Args: + data: The data to be converted. + shape: The desired shape of the data. + + Returns: + object: The converted data. + + Examples: + This method is intended to be called internally within the class and does not have direct usage examples. + """ + if isinstance(data, str): + data = Parser.process_string(data, shape) + if isinstance(data, list): + if ( + isinstance(shape, list) + and not any(isinstance(c, str) for c in data) + and all(c is not None for c in data) + ): + data = np.asarray(data).reshape(shape) + elif isinstance(data, str): + data = Parser.convert_string_to(data) + return data + + +class Parameter: + """ + Paravision Parameter object + + This class extends the Parser class and provides methods to initialize the object with a stringlist of parameter dictionaries, retrieve the parameters and headers, and process the contents of the data. + + Args: + stringlist: A list of strings containing the parameter dictionaries. + + Examples: + >>> stringlist = ["param1", "param2"] + >>> parameter = Parameter(stringlist) + + Attributes: + parameters (property): Get the parameters of the data. + headers (property): Get the headers of the data. + + Methods: + _process_contents: Process the contents of the data based on the given parameters. + _set_param: Set the parameters and headers based on the given data. + """ + def __init__(self, stringlist, name, scan_id=None, reco_id=None): + """ + Initialize the Parameter object with the given stringlist, name, scan_id, and reco_id. + + Args: + stringlist: A list of strings containing the parameter dictionaries. + name: The name of the Parser object. + scan_id: The scan ID associated with the Parser object. + reco_id: The reco ID associated with the Parser object. + + Examples: + >>> stringlist = ["param1", "param2"] + >>> name = "MyParser" + >>> scan_id = 12345 + >>> reco_id = 67890 + >>> parser = Parser(stringlist, name, scan_id, reco_id) + """ + self._name = name + self._repr_items = [] + if scan_id: + self._repr_items.append(f'scan_id={scan_id}') + if reco_id: + self._repr_items.append(f'reco_id={reco_id}') + self._set_param(*Parser.load_param(stringlist)) + + @property + def name(self): + if '_' in self._name: + return ''.join([s.capitalize() for s in self._name.split('_')]) + return self._name.capitalize() + + @property + def parameters(self): + """ + Get the parameters of the data. + + Returns: + OrderedDict: The parameters of the data. + + Examples: + This property can be accessed directly on an instance of the class to retrieve the parameters. + """ + return self._parameters + + @property + def header(self): + """ + Get the headers of the data. + + Returns: + OrderedDict: The headers of the data. + + Examples: + This property can be accessed directly on an instance of the class to retrieve the headers. + """ + return self._header + + def _process_contents(self, contents, addr, addr_diff, index, value): + """ + Process the contents of the data based on the given parameters. + + Args: + contents: The contents of the data. + addr: The address of the current parameter. + addr_diff: The difference in addresses between parameters. + index: The index of the current parameter. + value: The value of the current parameter. + + Returns: + tuple: A tuple containing the processed data and its shape. + + Examples: + This method is intended to be called internally within the class and does not have direct usage examples. + """ + if addr_diff[index] > 1: + c_lines = contents[(addr + 1):(addr + addr_diff[index])] + data = " ".join([line.strip() for line in c_lines if not re.match(ptrn_comment, line)]) + return (data, value) if data else (Parser.convert_string_to(value), -1) + return Parser.convert_string_to(value), -1 + + def _set_param(self, params, param_addr, contents): + """ + Set the parameters and headers based on the given data. + + Args: + params: A list of parameter information. + param_addr: The addresses of the parameters. + contents: The contents of the data. + + Raises: + ValueError: If an invalid dtype is encountered. + + Examples: + This method is intended to be called internally within the class and does not have direct usage examples. + """ + addr_diff = np.diff(param_addr) + self._contents = contents + self._header = OrderedDict() + self._parameters = OrderedDict() + for index, addr in enumerate(param_addr[:-1]): + dtype, key, value = params[addr] + data, shape = self._process_contents(contents, addr, addr_diff, index, value) + + if dtype is PARAMETER: + self._parameters[key] = Parser.convert_data_to(data, shape) + elif dtype is HEADER: + self._header[key] = data + else: + raise ValueError("Invalid dtype encountered in _set_param") + + def __getitem__(self, key): + return self.parameters[key] + + def __repr__(self): + return f"{self.name}({', '.join(self._repr_items)})" + + def keys(self): + return self.parameters.keys() \ No newline at end of file From 2544209797de7eef2b7a90d46e3e287653c29d55 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 27 Mar 2024 09:09:40 -0400 Subject: [PATCH 04/38] configuration module for brkraw package --- brkraw/config.py | 99 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 99 insertions(+) create mode 100644 brkraw/config.py diff --git a/brkraw/config.py b/brkraw/config.py new file mode 100644 index 0000000..ea51696 --- /dev/null +++ b/brkraw/config.py @@ -0,0 +1,99 @@ +import toml +from pathlib import Path + +class ConfigManager: + """ + Manage the configuration settings. + + Notes: + - Provides methods to ensure the existence of the config directory, load or create the configuration, set configuration values, and retrieve configuration values. + """ + def __init__(self): + """ + Initialize the configuration object. + + Notes: + - Sets up the home directory, config directory, and config file paths. + - Ensures the existence of the config directory and loads or creates the configuration. + """ + self.home_dir = Path.home() + self.config_dir = self.home_dir / '.brkraw' + self.config_file = self.config_dir / 'config.toml' + self.ensure_config_dir_exists() + self.load_or_create_config() + + def ensure_config_dir_exists(self): + """ + Ensure the existence of the configuration directory. + + Notes: + - Creates the config directory if it does not already exist. + - Also creates 'plugin' and 'bids' directories within the config directory. + """ + if not self.config_dir.exists(): + self.config_dir.mkdir() + (self.config_dir / 'plugin').mkdir() + (self.config_dir / 'bids').mkdir() + + def load_or_create_config(self): + """ + Load an existing configuration file or create a new one if it does not exist. + + Notes: + - If the config file does not exist, a default configuration is created and saved. + - Otherwise, the existing configuration is loaded from the file. + """ + if not self.config_file.exists(): + default_config = { + 'spec': { + 'pvdataset': { + 'binary_files': [], + 'parameter_files': ['subject', 'ResultState', 'AdjStatePerStudy', 'study.MR'] + }, + 'pvscan': { + 'binary_files': ['fid', 'rawdata.job0'], + 'parameter_files': ['method', 'acqp', 'configscan', 'visu_pars', 'AdjStatePerScan'] + }, + 'pvreco': { + 'binary_files': ['2dseq'], + 'parameter_files': ['reco', 'visu_pars', 'procs', 'methreco', 'id'] + } + } + } + with open(self.config_file, 'w') as f: + toml.dump(default_config, f) + self.config = default_config + else: + with open(self.config_file, 'r') as f: + self.config = toml.load(f) + + def set(self, key, value): + """ + Set a key-value pair in the configuration and save the updated configuration to the file. + + Args: + key: The key to set in the configuration. + value: The value to associate with the key. + + Notes: + - Updates the configuration with the provided key-value pair. + - Persists the updated configuration to the config file. + """ + self.config[key] = value + with open(self.config_file, 'w') as f: + toml.dump(self.config, f) + + def get(self, key): + """ + Retrieve the value associated with the given key from the configuration. + + Args: + key: The key to retrieve the value for. + + Returns: + The value associated with the key in the configuration, or None if the key is not found. + + Notes: + - Returns the value corresponding to the provided key from the configuration. + """ + return self.config.get(key) From 3834ce78fd1a3d31015dc710af96f728bde9159d Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 27 Mar 2024 09:11:43 -0400 Subject: [PATCH 05/38] update documentation of pvobj --- brkraw/api/pvobj/__init__.py | 5 +- brkraw/api/pvobj/dataset.py | 382 +++++++++++++++++++++++++++++------ 2 files changed, 319 insertions(+), 68 deletions(-) diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index cf2f931..887164f 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,3 +1,4 @@ -from dataset import PvDataset +from .dataset import PvDataset, PvScan, PvReco +from .parser import Parameter -__all__ = ["PvDataset"] \ No newline at end of file +__all__ = ["PvDataset", "PvScan", "PvReco", "Parameter"] \ No newline at end of file diff --git a/brkraw/api/pvobj/dataset.py b/brkraw/api/pvobj/dataset.py index 819c715..8651a4e 100755 --- a/brkraw/api/pvobj/dataset.py +++ b/brkraw/api/pvobj/dataset.py @@ -2,6 +2,7 @@ import re import zipfile from collections import OrderedDict +from collections import defaultdict try: from .parser import Parameter except ImportError: @@ -9,6 +10,19 @@ from brkraw.api.pvobj.parser import Parameter class BaseMethods: + """ + The `BaseMethods` class provides internal method for PvObjects. + + Explanation: + This class contains various methods for handling files and directories, including fetching directory structure, + fetching zip file contents, opening files as file objects or strings, retrieving values associated with keys, and setting configuration options. + + Args: + **kwargs: Keyword arguments for configuration options. + + Returns: + None + """ _scan_id = None _reco_id = None _path = None @@ -17,6 +31,19 @@ class BaseMethods: _parameter_files = None _binary_files = None + def __init__(self, **kwargs): + """ + Initialize the object. + + Args: + **kwargs: Keyword arguments for configuration options. + + Returns: + None + """ + if kwargs: + self.config(**kwargs) + @staticmethod def _fetch_dir(path): """Searches for directories and files in a given directory and returns the directory structure. @@ -54,23 +81,16 @@ def _fetch_zip(path): - 'file_indexes': A list of file indexes. """ with zipfile.ZipFile(path) as zip_file: - all_paths = {os.path.dirname(item.filename) for item in zip_file.infolist() if item.is_dir()} - contents = OrderedDict({path: {'dirs': set(), 'files': [], 'file_indexes': []} for path in all_paths}) + contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': []}) for i, item in enumerate(zip_file.infolist()): if not item.is_dir(): dirpath, filename = os.path.split(item.filename) contents[dirpath]['files'].append(filename) contents[dirpath]['file_indexes'].append(i) - # Add missing parent directories - parent_path = dirpath - while parent_path != '': - parent_path = os.path.dirname(parent_path) - if parent_path not in contents: - contents[parent_path] = {'dirs': set(), 'files': [], 'file_indexes': []} - for sub_path in all_paths: - parent_path, dirname = os.path.split(sub_path.rstrip('/')) - if parent_path in contents: - contents[parent_path]['dirs'].add(dirname) + while dirpath: + dirpath, dirname = os.path.split(dirpath) + if dirname: + contents[dirpath]['dirs'].add(dirname) return contents def _open_as_fileobject(self, key): @@ -88,30 +108,24 @@ def _open_as_fileobject(self, key): contents = self._contents if 'files' in self._contents else self._contents[list(self._contents.keys())[0]] rootpath = self._rootpath if 'files' in self._contents else self._path files = contents.get('files') + path_list = [*([str(self._scan_id)] if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] if key not in files: - raise ValueError(f'file not exists. [{",".join(files)}]') + if file_indexes := contents.get('file_indexes'): + rel_path = self._path + else: + rel_path = os.path.join(*path_list) + raise ValueError(f'file not exists in "{rel_path}".\n [{", ".join(files)}]') if file_indexes := contents.get('file_indexes'): with zipfile.ZipFile(rootpath) as zf: idx = file_indexes[files.index(key)] return zf.open(zf.namelist()[idx]) else: - path_list = [rootpath, *(str(self._scan_id) if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] + path_list.insert(0, rootpath) path = os.path.join(*path_list) return open(path, 'rb') - def _open_as_binary(self, key): - """Opens a file as binary and reads its contents. - - Args: - key: The key to identify the file. - - Returns: - bytes: The binary contents of the file. - """ - return self._open_as_fileobject(key).read() - def _open_as_string(self, key): """Opens a file as binary, decodes it as UTF-8, and splits it into lines. @@ -121,7 +135,7 @@ def _open_as_string(self, key): Returns: list: The lines of the file as strings. """ - return self._open_as_binary(key).decode('UTF-8').split('\n') + return self._open_as_fileobject(key).read().decode('UTF-8').split('\n') def __getitem__(self, key): """Returns the value associated with the given key. @@ -137,18 +151,73 @@ def __getitem__(self, key): """ if key in self._parameter_files: return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) - elif key in self._binary_files: - return self._open_as_binary(key) else: return self._open_as_fileobject(key) + def config(self, **kwargs): + """ + Set the configuration options for the object. + + Args: + **kwargs: Keyword arguments for the configuration options. + binary_files (list): A list of binary file names. + parameter_files (list): A list of parameter file names. + + Returns: + None + """ + if 'binary_files' in kwargs: + self._binary_files = kwargs['binary_files'] + if 'parameter_files' in kwargs: + self._parameter_files = kwargs['parameter_files'] + + + def __dir__(self): + return ['set_config'] + class PvDataset(BaseMethods): - def __init__(self, path): - self._check_dataset_validity(path) - self._construct() - if root_content := [c for c in self._contents.values() if 'subject' in c['files']]: - setattr(self, 'subject', root_content) + """ + A class representing a PvDataset object. + + Inherits from BaseMethods. + + Attributes: + is_compressed (bool): Indicates if the dataset is compressed. + + Methods: + get_scan(scan_id): Get a specific scan object by ID. + get_reco(scan_id, reco_id): Get a specific reco object by scan ID and reco ID. + + Properties: + path (str): The path of the object. + avail (list): A list of available scans. + contents (dict): A dictionary of pvdataset contents. + """ + def __init__(self, path, debug=False, **kwargs): + """ + Initialize the object with the given path and optional debug flag. + + Args: + path: The path to initialize the object with. + debug: A flag indicating whether debug mode is enabled. + **kwargs: Additional keyword arguments. + + Raises: + Any exceptions raised by _check_dataset_validity or _construct methods. + + Notes: + If 'pvdataset' is present in kwargs, it will be used to initialize the object via super(). + + Examples: + obj = ClassName(path='/path/to/dataset', debug=True) + """ + + if 'pvdataset' in kwargs: + super().__init__(**kwargs['pvdataset']) + if not debug: + self._check_dataset_validity(path) + self._construct(**kwargs) # internal method def _check_dataset_validity(self, path): @@ -180,7 +249,7 @@ def _check_dataset_validity(self, path): else: raise ValueError(f"The path '{self._path}' does not meet the required criteria.") - def _construct(self): + def _construct(self, **kwargs): # sourcery skip: low-code-quality """ Constructs the object by organizing the contents. @@ -188,38 +257,74 @@ def _construct(self): It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. After processing, it removes the processed paths from the `_contents` dictionary. + Args: + **kwargs: keyword argument for datatype specification. + Returns: None """ self._scans = OrderedDict() self._backup = OrderedDict() + to_remove = [] - for path, contents in sorted(self._contents.items()): + for path, contents in self._contents.items(): if not path: self._root = contents to_remove.append(path) - else: - if matched := re.match(r'(?:.*/)?(\d+)/pdata/(\d+)$', path) or re.match( - r'(?:.*/)?(\d+)$', path - ): - scan_id = int(matched.group(1)) - if scan_id not in self._scans: - self._scans[scan_id] = PvScan(scan_id, (self.path, path)) - if 'pdata' in contents['dirs']: - self._scans[scan_id].update(contents) - elif len(matched.groups()) == 2: - reco_id = int(matched.group(2)) - self._scans[scan_id].set_reco(path, reco_id, contents) - - to_remove.append(path) - if not contents['files']: - to_remove.append(path) - elif 'subject' not in contents['files'] and path not in to_remove: - self._backup[path] = contents - to_remove.append(path) - - for path in to_remove: - del self._contents[path] + elif not contents['files']: + to_remove.append(path) + elif matched := re.match(r'(?:.*/)?(\d+)/(\D+)/(\d+)$', path) or re.match(r'(?:.*/)?(\d+)$', path): + to_remove.append(self._process_childobj(matched, (path, contents), **kwargs)) + self._clear_contents(to_remove) + + def _process_childobj(self, matched, item, **kwargs): + """ + The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. + + Args: + matched: A `re.Match` object representing the matched pattern. + item: A tuple containing the path and contents of the child object. + **kwargs: Additional keyword arguments. + + Returns: + str: The path of the processed child object. + + Raises: + None. + + Examples: + # Example usage of _process_childobj + matched = re.match(pattern, input_string) + item = ('path/to/child', {'dirs': set(), 'files': [], 'file_indexes': []}) + result = obj._process_childobj(matched, item, pvscan={'binary_files': [], 'parameter_files': ['method', 'acqp', 'visu_pars']}) + """ + path, contents = item + scan_id = int(matched.group(1)) + if scan_id not in self._scans: + pvscan_kwargs = kwargs.get('pvscan') or {} + self._scans[scan_id] = PvScan(scan_id, (self.path, path), **pvscan_kwargs) + if len(matched.groups()) == 1 and 'pdata' in contents['dirs']: + self._scans[scan_id].update(contents) + elif len(matched.groups()) == 3 and matched.group(2) == 'pdata': + reco_id = int(matched.group(3)) + pvreco_kwargs = kwargs.get('pvreco') or {} + self._scans[scan_id].set_reco(path, reco_id, contents, **pvreco_kwargs) + else: + self._backup[path] = contents + return path + + @property + def contents(self): + for _, contents in self._contents.items(): + if 'subject' in contents['files']: + return contents + + def _clear_contents(self, to_be_removed): + for path in to_be_removed: + try: + del self._contents[path] + except KeyError: + self._dummy.append(path) @property def path(self): @@ -233,49 +338,188 @@ def path(self): @property def avail(self): - return list(self._scans) + """ + A property representing the available scans. + + Returns: + list: A list of available scans. + """ + return sorted(list(self._scans)) def get_scan(self, scan_id): + """ + Get a specific scan object by ID. + + Args: + scan_id (int): The ID of the scan object to retrieve. + + Returns: + object: The specified scan object. + + Raises: + KeyError: If the specified scan ID does not exist. + """ return self._scans[scan_id] def get_reco(self, scan_id, reco_id): + """ + Get a specific reco object by scan ID and reco ID. + + Args: + scan_id (int): The ID of the scan. + reco_id (int): The ID of the reco object to retrieve. + + Returns: + object: The specified reco object. + + Raises: + KeyError: If the specified scan ID or reco ID does not exist. + """ return self.get_scan(scan_id).get_reco(reco_id) def __dir__(self): - return ['path', 'avail', 'get_scan', 'get_reco'] + return super().__dir__() + ['path', 'avail', 'get_scan', 'get_reco'] class PvScan(BaseMethods): - def __init__(self, scan_id, pathes, contents=None, recos=None): + """ + A class representing a PvScan object. + + Inherits from BaseMethods. + + Methods: + update(contents): Update the contents of the dataset. + set_reco(path, reco_id, contents): Set a reco object with the specified path, ID, and contents. + get_reco(reco_id): Get a specific reco object by ID. + + Properties: + path (str): The path. + avail (list): A list of available items. + """ + def __init__(self, scan_id, pathes, contents=None, recos=None, **kwargs): + """ + Initialize a Dataset object. + + Args: + scan_id (int): The ID of the scan. + pathes (tuple): A tuple containing the root path and the path. + contents (list, optional): The initial contents of the dataset. Defaults to None. + recos (dict, optional): A dictionary of reco objects. Defaults to None. + + Attributes: + _scan_id (int): The ID of the scan. + _rootpath (str): The root path. + _path (str): The path. + _recos (OrderedDict): An ordered dictionary of reco objects. + + Methods: + update(contents): Update the contents of the dataset. + """ + super().__init__(**kwargs) self._scan_id = scan_id self._rootpath, self._path = pathes self.update(contents) self._recos = OrderedDict(recos) if recos else OrderedDict() def update(self, contents): + """ + Update the contents of the dataset. + + Args: + contents (list): The new contents of the dataset. + + Returns: + None + """ self._contents = contents - def set_reco(self, path, reco_id, contents): - self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) + def set_reco(self, path, reco_id, contents, **kwargs): + """ + Set a reco object with the specified path, ID, and contents. + + Args: + path (str): The path of the reco object. + reco_id (int): The ID of the reco object. + contents (list): The contents of the reco object. + + Returns: + None + """ + self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents, **kwargs) def get_reco(self, reco_id): + """ + Get a specific reco object by ID. + + Args: + reco_id (int): The ID of the reco object to retrieve. + + Returns: + object: The specified reco object. + + Raises: + KeyError: If the specified reco ID does not exist. + """ return self._recos[reco_id] @property def path(self): + """ + A property representing the path. + + Returns: + str: The path. + """ return self._path @property def avail(self): - return list(self._recos) + """ + A property representing the available items. + + Returns: + list: A list of available items. + """ + return sorted(list(self._recos)) def __dir__(self): - return ['path', 'avail', 'get_reco'] + return super().__dir__() + ['path', 'avail', 'get_reco'] class PvReco(BaseMethods): - def __init__(self, scan_id, reco_id, pathes, contents): + """ + A class representing a PvReco object. + + Inherits from BaseMethods. + + Args: + scan_id (int): The ID of the scan. + reco_id (int): The ID of the reconstruction. + pathes (tuple): A tuple containing the root path and the path. + contents (list): A list of contents. + + Properties: + path (str): The path. + """ + def __init__(self, scan_id, reco_id, pathes, contents, **kwargs): + """ + Initialize a Dataset object. + + Args: + scan_id (int): The ID of the scan. + reco_id (int): The ID of the reconstruction. + pathes (tuple): A tuple containing the root path and the path. + contents (list): A list of contents. + + Attributes: + _scan_id (int): The ID of the scan. + _reco_id (int): The ID of the reconstruction. + _rootpath (str): The root path. + _path (str): The path. + _contents (list): The list of contents. + """ + super().__init__(**kwargs) self._scan_id = scan_id self._reco_id = reco_id self._rootpath, self._path = pathes @@ -283,7 +527,13 @@ def __init__(self, scan_id, reco_id, pathes, contents): @property def path(self): + """ + A property representing the path. + + Returns: + str: The path. + """ return self._path def __dir__(self): - return ['path'] \ No newline at end of file + return super().__dir__() + ['path'] \ No newline at end of file From 3f422d5f579332e85dc58b7b7b6503c4514300fa Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 27 Mar 2024 09:13:28 -0400 Subject: [PATCH 06/38] create loader module --- brkraw/api/__init__.py | 8 ++------ brkraw/api/loader.py | 6 ++++++ 2 files changed, 8 insertions(+), 6 deletions(-) create mode 100644 brkraw/api/loader.py diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index cabd18a..b669a00 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,7 +1,3 @@ -import pvobj +from .loader import BrukerLoader -__all__ = ['pvobj', 'BrukerLoader'] - -class BrukerLoader: - def __init__(self, path): - self._pvobj = pvobj.PvDataset(path) +__all__ = ['BrukerLoader'] diff --git a/brkraw/api/loader.py b/brkraw/api/loader.py new file mode 100644 index 0000000..a36741c --- /dev/null +++ b/brkraw/api/loader.py @@ -0,0 +1,6 @@ +from .pvobj import PvDataset +from ..config import ConfigManager + +class BrukerLoader: + def __init__(self, path): + self._pvobj = PvDataset(path, **ConfigManager().get('spec')) From 425ca1ee74b131391d9acba8471f76389412f379 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 27 Mar 2024 09:14:06 -0400 Subject: [PATCH 07/38] update gitignore for debugging --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 4cb2e2b..dbf4294 100644 --- a/.gitignore +++ b/.gitignore @@ -13,4 +13,5 @@ build .DS_Store tests/tutorials _test.py -_*.ipynb \ No newline at end of file +_*.ipynb +_*.log \ No newline at end of file From 05a3a8541810cdb9c88b669c49bb2e9500eb8b1d Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Wed, 27 Mar 2024 09:17:01 -0400 Subject: [PATCH 08/38] make config module accessible from package --- brkraw/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brkraw/__init__.py b/brkraw/__init__.py index ac274a9..130a9c2 100644 --- a/brkraw/__init__.py +++ b/brkraw/__init__.py @@ -1,7 +1,7 @@ from .lib import * __version__ = '0.3.11' -__all__ = ['BrukerLoader', '__version__'] +__all__ = ['BrukerLoader', '__version__', 'config'] def load(path): From cb4933e49411bd07181f20701f88f80c2108784a Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 11 Apr 2024 20:59:52 -0400 Subject: [PATCH 09/38] [update] PvObjects, Parameter, and Parser --- brkraw/api/pvobj/dataset.py | 57 +++++++++++++++++++++++++------------ brkraw/api/pvobj/parser.py | 32 ++++++++++++--------- 2 files changed, 58 insertions(+), 31 deletions(-) diff --git a/brkraw/api/pvobj/dataset.py b/brkraw/api/pvobj/dataset.py index 8651a4e..ebf5357 100755 --- a/brkraw/api/pvobj/dataset.py +++ b/brkraw/api/pvobj/dataset.py @@ -153,6 +153,29 @@ def __getitem__(self, key): return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) else: return self._open_as_fileobject(key) + + def __getattr__(self, key): + """ + Get attribute by name. + + Args: + key (str): The name of the attribute to retrieve. + + Returns: + Parameter or file object: The parameter object if the key is found in parameter files, otherwise the file object. + + Examples: + obj = Dataset() + param = obj.some_key # Returns a Parameter object or file object. + """ + if any(param_key == key or param_key.replace('.', '_') == key for param_key in self._parameter_files): + return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) + elif any(binary_key == key or binary_key.replace('.', '_') == key for binary_key in self._binary_files): + return self._open_as_fileobject(key) + elif any(file_key == key or file_key.replace('.', '_') == key for file_key in self._contents['files']): + return self._open_as_fileobject(key) + else: + raise AttributeError def config(self, **kwargs): """ @@ -361,24 +384,8 @@ def get_scan(self, scan_id): """ return self._scans[scan_id] - def get_reco(self, scan_id, reco_id): - """ - Get a specific reco object by scan ID and reco ID. - - Args: - scan_id (int): The ID of the scan. - reco_id (int): The ID of the reco object to retrieve. - - Returns: - object: The specified reco object. - - Raises: - KeyError: If the specified scan ID or reco ID does not exist. - """ - return self.get_scan(scan_id).get_reco(reco_id) - def __dir__(self): - return super().__dir__() + ['path', 'avail', 'get_scan', 'get_reco'] + return super().__dir__() + ['path', 'avail', 'get_scan'] @@ -462,6 +469,20 @@ def get_reco(self, reco_id): KeyError: If the specified reco ID does not exist. """ return self._recos[reco_id] + + def get_visu_pars(self, reco_id=None): + if reco_id: + return getattr(self.get_reco(reco_id), 'visu_pars') + elif 'visu_pars' in self._contents['files']: + return getattr(self, 'visu_pars') + elif len(self.avail): + recoobj = self.get_reco(self.avail[0]) + if 'visu_pars' not in recoobj._contents['files']: + raise FileNotFoundError + else: + return getattr(recoobj, 'visu_pars') + else: + raise FileNotFoundError @property def path(self): @@ -484,7 +505,7 @@ def avail(self): return sorted(list(self._recos)) def __dir__(self): - return super().__dir__() + ['path', 'avail', 'get_reco'] + return super().__dir__() + ['path', 'avail', 'get_reco', 'get_visu_pars'] class PvReco(BaseMethods): diff --git a/brkraw/api/pvobj/parser.py b/brkraw/api/pvobj/parser.py index 59d9994..901269f 100755 --- a/brkraw/api/pvobj/parser.py +++ b/brkraw/api/pvobj/parser.py @@ -120,7 +120,7 @@ def clean_up_elements_in_array(data): return data @staticmethod - def process_bisarray(elements): + def process_bisarray(elements, shape): """Determines the case of an array with BIS prefix by converting each element to a specific data type. Args: @@ -130,7 +130,10 @@ def process_bisarray(elements): float, int, or list: The converted elements of the bisarray. If there is only one element, it is returned as is, otherwise a list of converted elements is returned. """ elements = [Parser.convert_string_to(c) for c in elements] - return elements.pop() if len(elements) == 1 else elements + elements = elements.pop() if len(elements) == 1 else elements + if isinstance(shape, list) and shape[0] == len(elements): + elements = [e.split(',') for e in elements] + return elements @staticmethod def process_complexarray(data): @@ -182,18 +185,19 @@ def process_string(data, shape): >>> process_string(data, shape) 'Hello, World!' """ + shape = Parser.parse_shape(shape) if elements := re.findall(ptrn_bisstring, data): - return Parser.process_bisarray(elements) - - data = Parser.clean_up_elements_in_array(data) + data = Parser.process_bisarray(elements, shape) + return data, -1 + else: + data = Parser.clean_up_elements_in_array(data) if re.match(ptrn_complex_array, data): - return Parser.process_complexarray(data) + data = Parser.process_complexarray(data) elif re.match(ptrn_string, data): - return re.sub(ptrn_string, r'\g', data) - - shape = Parser.parse_shape(shape) - data = Parser.parse_data(data) - return data + data = re.sub(ptrn_string, r'\g', data) + else: + data = Parser.parse_data(data) + return data, shape @staticmethod def parse_shape(shape): @@ -302,7 +306,7 @@ def convert_data_to(data, shape): This method is intended to be called internally within the class and does not have direct usage examples. """ if isinstance(data, str): - data = Parser.process_string(data, shape) + data, shape = Parser.process_string(data, shape) if isinstance(data, list): if ( isinstance(shape, list) @@ -438,7 +442,6 @@ def _set_param(self, params, param_addr, contents): for index, addr in enumerate(param_addr[:-1]): dtype, key, value = params[addr] data, shape = self._process_contents(contents, addr, addr_diff, index, value) - if dtype is PARAMETER: self._parameters[key] = Parser.convert_data_to(data, shape) elif dtype is HEADER: @@ -449,6 +452,9 @@ def _set_param(self, params, param_addr, contents): def __getitem__(self, key): return self.parameters[key] + def __getattr__(self, key): + return self.parameters[key] + def __repr__(self): return f"{self.name}({', '.join(self._repr_items)})" From d6ce0e2eda32f8bc9fd56d6e7efe9e67c61f8fc3 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 11 Apr 2024 21:02:13 -0400 Subject: [PATCH 10/38] [added] New Loader, ScanObj, ScanAnalyzer, and Helpers --- brkraw/api/analyzer.py | 230 ++++++++++++++++++ brkraw/api/helper.py | 531 +++++++++++++++++++++++++++++++++++++++++ brkraw/api/loader.py | 108 ++++++++- 3 files changed, 866 insertions(+), 3 deletions(-) create mode 100644 brkraw/api/analyzer.py create mode 100644 brkraw/api/helper.py diff --git a/brkraw/api/analyzer.py b/brkraw/api/analyzer.py new file mode 100644 index 0000000..83372dc --- /dev/null +++ b/brkraw/api/analyzer.py @@ -0,0 +1,230 @@ +from __future__ import annotations +import re +from brkraw.api import helper +import numpy as np +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from .pvobj import PvScan + from .loader import ScanInfo + from io import BufferedReader + from zipfile import ZipExtFile + +SLICEORIENT = { + 0: 'sagital', + 1: 'coronal', + 2: 'axial' + } + +SUBJTYPE = ['Biped', 'Quadruped', 'Phantom', 'Other', 'OtherAnimal'] +SUBJPOSE = { + 'part': ['Head', 'Foot', 'Tail'], + 'side': ['Supine', 'Prone', 'Left', 'Right'] +} + +class Pars: + def __init__(self): + pass + +class ScanInfoAnalyzer: + """Helps parse metadata from multiple parameter files to make it more human-readable. + + Args: + pvscan (PvScan): The PvScan object containing acquisition and method parameters. + reco_id (int, optional): The reconstruction ID. Defaults to None. + + Raises: + NotImplementedError: If an operation is not implemented. + """ + def __init__(self, pvscan: 'PvScan', reco_id:int|None = None): + self.pars = self.get_pars(pvscan, reco_id) + self.info_protocol = helper.Protocol(self).get_info() + if self.pars.visu: + self._set_attrs() + self.info_dataarray = helper.DataArray(self).get_info() + self.info_frame_group = helper.FrameGroup(self).get_info() + self.info_image = helper.Image(self).get_info() + self.info_slicepack = helper.SlicePack(self).get_info() + self.info_cycle = helper.Cycle(self).get_info() + if self.info_image['dim'] > 1: + self.info_orientation = helper.Orientation(self).get_info() + + @staticmethod + def get_pars(pvscan: 'PvScan', reco_id: int|None): + pars = Pars() + for p in ['acqp', 'method']: + vals = getattr(pvscan, p) + setattr(pars, p, vals) + try: + visu = pvscan.get_visu_pars(reco_id) + except FileNotFoundError: + visu = None + setattr(pars, 'visu', visu) + return pars + + def _set_attrs(self): + """ + Parse parameters and set attributes from acqp, method, and visu_pars files. + This function parses parameters from different objects (acqp, method, visu_pars) and sets corresponding attributes in the instance. + Only attributes with prefixes 'Visu', 'PVM_', 'ACQ_' are set as object attributes in snake case to follow Pythonic naming conventions. + + Args: + pvscan: The pvscan parameter. + reco_id: The reco_id parameter. + """ + for prefix, pars_obj in {'Visu': self.pars.visu, + 'PVM_': self.pars.method, + 'ACQ_': self.pars.acqp}.items(): + for key in pars_obj.keys(): + if prefix in key: + attr = self._camel_to_snake_case(key.replace(prefix, '')) + value = getattr(pars_obj, key) + attr = f'{prefix.lower()}{attr}' if '_' in prefix else f'{prefix.lower()}_{attr}' + setattr(self, attr, value) + + @staticmethod + def _camel_to_snake_case(input_string: str): + matches = re.finditer(r'[A-Z]+[^A-Z]*', input_string) + output_string = [] + for m in matches: + string = m.group() + is_upper = [bool(char.isupper()) for char in string] + if sum(is_upper) > 1 and not all(is_upper): + idx_for_space = is_upper.index(False) + output_string.append(f'{string[:idx_for_space-1]}_{string[idx_for_space-1:]}'.lower()) + else: + output_string.append(string.lower()) + return '_'.join(output_string) + + def __dir__(self): + return [attr for attr in self.__dict__.keys() if 'info_' in attr] + + +class AffineAnalyzer: + def __init__(self, infoobj: 'ScanInfo'): + if infoobj.image['dim'] == 2: + xr, yr = infoobj.image['resolution'] + self.resolution = [(xr, yr, zr) for zr in infoobj.slicepack['slice_distances_each_pack']] + elif self.info.image['dim'] == 3: + self.resolution = infoobj.image['resolution'] + else: + raise NotImplementedError + if infoobj.slicepack['num_slice_packs'] > 1: + self.affine = [ + self._calculate_affine(infoobj, slicepack_id) + for slicepack_id in range(infoobj.slicepack['num_slice_packs']) + ] + else: + self.affine = self._calculate_affine(infoobj) + + self.subj_type = infoobj.orientation['subject_type'] if hasattr(infoobj, 'orientation') else None + self.subj_position = infoobj.orientation['subject_position'] if hasattr(infoobj, 'orientation') else None + + def get_affine(self, subj_type:str|None=None, subj_position:str|None=None): + subj_type = subj_type or self.subj_type + subj_position = subj_position or self.subj_position + if isinstance(self.affine, list): + affine = [self._correct_orientation(aff, subj_position, subj_type) for aff in self.affine] + elif isinstance(self.affine, np.ndarray): + affine = self._correct_orientation(self.affine, subj_position, subj_type) + return affine + + def _calculate_affine(self, infoobj: 'ScanInfo', slicepack_id:int|None = None): + sidx = infoobj.orientation['orientation_desc'][slicepack_id].index(2) \ + if slicepack_id else infoobj.orientation['orientation_desc'].index(2) + slice_orient = SLICEORIENT[sidx] + resol = self.resolution[slicepack_id] \ + if slicepack_id else self.resolution[0] + orientation = infoobj.orientation['orientation'][slicepack_id] \ + if slicepack_id else infoobj.orientation['orientation'] + volume_origin = infoobj.orientation['volume_origin'][slicepack_id] \ + if slicepack_id else infoobj.orientation['volume_origin'] + if infoobj.slicepack['reverse_slice_order']: + slice_distance = infoobj.slicepack['slice_distances_each_pack'][slicepack_id] \ + if slicepack_id else infoobj.slicepack['slice_distances_each_pack'] + volume_origin = self._correct_origin(orientation, volume_origin, slice_distance) + return self._compose_affine(resol, orientation, volume_origin, slice_orient) + + @staticmethod + def _correct_origin(orientation, volume_origin, slice_distance): + new_origin = orientation.dot(volume_origin) + new_origin[-1] += slice_distance + return orientation.T.dot(new_origin) + + @staticmethod + def _compose_affine(resolution, orientation, volume_origin, slice_orient): + resol = np.array(resolution) + if slice_orient in ['axial', 'sagital']: + resol = np.diag(resol) + else: + resol = np.diag(resol * np.array([1, 1, -1])) + + rmat = orientation.T.dot(resol) + return helper.from_matvec(rmat, volume_origin) + + @staticmethod + def _est_rotate_angle(subj_pose): + rotate_angle = {'rad_x':0, 'rad_y':0, 'rad_z':0} + if subj_pose: + if subj_pose == 'Head_Supine': + rotate_angle['rad_z'] = np.pi + elif subj_pose == 'Head_Prone': + pass + elif subj_pose == 'Head_Left': + rotate_angle['rad_z'] = np.pi/2 + elif subj_pose == 'Head_Right': + rotate_angle['rad_z'] = -np.pi/2 + elif subj_pose in ['Foot_Supine', 'Tail_Supine']: + rotate_angle['rad_x'] = np.pi + elif subj_pose in ['Foot_Prone', 'Tail_Prone']: + rotate_angle['rad_y'] = np.pi + elif subj_pose in ['Foot_Left', 'Tail_Left']: + rotate_angle['rad_y'] = np.pi + rotate_angle['rad_z'] = -np.pi/2 + elif subj_pose in ['Foot_Right', 'Tail_Right']: + rotate_angle['rad_y'] = np.pi + rotate_angle['rad_z'] = np.pi/2 + else: + raise NotImplementedError + return rotate_angle + + @classmethod + def _correct_orientation(cls, affine, subj_pose, subj_type): + cls._inspect_subj_info(subj_pose, subj_type) + rotate_angle = cls._est_rotate_angle(subj_pose) + affine = helper.rotate_affine(affine, **rotate_angle) + + if subj_type != 'Biped': + affine = helper.rotate_affine(affine, rad_x=-np.pi/2, rad_y=np.pi) + return affine + + @staticmethod + def _inspect_subj_info(subj_pose, subj_type): + part, side = subj_pose.split('_') + assert part in SUBJPOSE['part'], 'Invalid subject position' + assert side in SUBJPOSE['side'], 'Invalid subject position' + assert subj_type in SUBJTYPE, 'Invalid subject type' + + +class DataArrayAnalyzer: + def __init__(self, infoobj: 'ScanInfo', fileobj: BufferedReader|ZipExtFile): + self._parse_info(infoobj) + self.buffer = fileobj + + def _parse_info(self, infoobj: 'ScanInfo'): + if not hasattr(infoobj, 'dataarray'): + raise AttributeError + self.slope = infoobj.dataarray['2dseq_slope'] + self.offset = infoobj.dataarray['2dseq_offset'] + self.dtype = infoobj.dataarray['2dseq_dtype'] + self.shape = infoobj.image['shape'] + self.shape_desc = infoobj.image['dim_desc'] + if infoobj.frame_group and infoobj.frame_group['type']: + self._calc_array_shape(infoobj) + + def _calc_array_shape(self, infoobj: 'ScanInfo'): + self.shape.extend(infoobj.frame_group['shape']) + self.shape_desc.extend([fgid.replace('FG_', '').lower() for fgid in infoobj.frame_group['id']]) + + def get_dataarray(self): + self.buffer.seek(0) + return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') diff --git a/brkraw/api/helper.py b/brkraw/api/helper.py new file mode 100644 index 0000000..b275755 --- /dev/null +++ b/brkraw/api/helper.py @@ -0,0 +1,531 @@ +from __future__ import annotations +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from .analyzer import ScanInfoAnalyzer, ScanInfo +import re +import math +import warnings +import contextlib +import numpy as np +from functools import partial, reduce + +WORDTYPE = \ + dict(_32BIT_SGN_INT = 'i', + _16BIT_SGN_INT = 'h', + _8BIT_UNSGN_INT = 'B', + _32BIT_FLOAT = 'f') +BYTEORDER = \ + dict(littleEndian = '<', + bigEndian = '>') + +def is_all_element_same(listobj): + if listobj is None: + return True + else: + return all(map(partial(lambda x, y: x == y, y=listobj[0]), listobj)) + +def from_matvec(mat, vec): + """Create an affine transformation matrix from a matrix and a vector.""" + if mat.shape == (3, 3) and vec.shape == (3,): + affine = np.eye(4) + affine[:3, :3] = mat + affine[:3, 3] = vec + return affine + else: + raise ValueError("Matrix must be 3x3 and vector must be 1x3") + +def to_matvec(affine): + """ + Decompose a 4x4 affine matrix into a 3x3 matrix and a 1x3 vector. + + Parameters: + affine (numpy.ndarray): A 4x4 affine transformation matrix. + + Returns: + tuple: A 3x3 matrix and a 1x3 vector. + """ + if affine.shape != (4, 4): + raise ValueError("Affine matrix must be 4x4") + mat = affine[:3, :3] + vec = affine[:3, 3] + return mat, vec + +def rotate_affine(affine, rad_x=0, rad_y=0, rad_z=0): + ''' axis = x or y or z ''' + rmat = dict(x = np.array([[1, 0, 0], + [0, np.cos(rad_x), -np.sin(rad_x)], + [0, np.sin(rad_x), np.cos(rad_x)]]).astype('float'), + y = np.array([[np.cos(rad_y), 0, np.sin(rad_y)], + [0, 1, 0], + [-np.sin(rad_y), 0, np.cos(rad_y)]]).astype('float'), + z = np.array([[np.cos(rad_z), -np.sin(rad_z), 0], + [np.sin(rad_z), np.cos(rad_z), 0], + [0, 0, 1]]).astype('float')) + af_mat, af_vec = to_matvec(affine) + rotated_mat = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_mat))) + rotated_vec = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_vec))) + return from_matvec(rotated_mat, rotated_vec) + +class BaseHelper: + def __init__(self): + self.warns = [] + + def _warn(self, message): + warnings.warn(message, UserWarning) + self.warns.append(message) + + +class Protocol(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + acqp = analobj.pars.acqp + method = analobj.pars.method + visu = analobj.pars.visu + + self.pv_version = str(visu['VisuCreatorVersion']) if visu else None + self.pulse_program = acqp['PULPROG'] + self.scan_name = acqp['ACQ_scan_name'] + self.scan_method = method['Method'] + if visu is None: + self._warn("visu_pars not found") + + def get_info(self): + return { + 'pv_version': self.pv_version, + 'pulse_program': self.pulse_program, + 'scan_name': self.scan_name, + 'scan_method': self.scan_method, + 'warns': [] + } + + +class DataArray(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + fid_word_type = f'_{"".join(analobj.pars.acqp["ACQ_word_size"].split("_"))}_SGN_INT' + fid_byte_order = f'{analobj.pars.acqp["BYTORDA"]}Endian' + self.fid_dtype = np.dtype(f'{BYTEORDER[fid_byte_order]}{WORDTYPE[fid_word_type]}') + + byte_order = getattr(analobj, 'visu_core_byte_order') + word_type = getattr(analobj, 'visu_core_word_type') + self.data_dtype = np.dtype(f'{BYTEORDER[byte_order]}{WORDTYPE[word_type]}') + data_slope = getattr(analobj, 'visu_core_data_slope') + data_offset = getattr(analobj, 'visu_core_data_offs') + self.data_slope = data_slope[0] \ + if isinstance(data_slope, list) and is_all_element_same(data_slope) else data_slope + self.data_offset = data_offset[0] \ + if isinstance(data_offset, list) and is_all_element_same(data_offset) else data_offset + + def get_info(self): + return { + 'fid_dtype': self.fid_dtype, + '2dseq_dtype': self.data_dtype, + '2dseq_slope': self.data_slope, + '2dseq_offset': self.data_offset, + 'warns': self.warns + } + + +class SlicePack(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + fg_info = analobj.info_frame_group if hasattr(analobj, 'info_frame_group') else FrameGroup(analobj).get_info() + img_info = analobj.info_image if hasattr(analobj, 'info_image') else Image(analobj).get_info() + if fg_info is None or fg_info['type'] is None: + num_slice_packs = 1 + num_slices_each_pack = [getattr(analobj, 'visu_core_frame_count')] + slice_distances_each_pack = [getattr(analobj, 'visu_core_frame_thickness')] \ + if img_info['dim'] > 1 else [] + else: + if analobj.visu_version == 1: + parser = self._parse_legacy + else: + parser = self._parse_6to360 + + num_slice_packs, num_slices_each_pack, slice_distances_each_pack = parser(analobj, fg_info) + if len(slice_distances_each_pack): + for i, d in enumerate(slice_distances_each_pack): + if d == 0: + slice_distances_each_pack[i] = getattr(analobj, 'visu_core_frame_thickness') + if not len(num_slices_each_pack): + num_slices_each_pack = [1] + + self.num_slice_packs = num_slice_packs + self.num_slices_each_pack = num_slices_each_pack + self.slice_distances_each_pack = slice_distances_each_pack + + disk_slice_order = getattr(analobj, 'visu_core_disk_slice_order') if hasattr(analobj, 'visu_core_disk_slice_order') else 'normal' + self.is_reverse = 'reverse' in disk_slice_order + if analobj.visu_version not in (1, 3, 4, 5): + self._warn(f'Parameters with current Visu Version has not been tested: v{analobj.visu_version}') + + def _parse_legacy(self, analobj, fg_info): + """ + Parses slice description for legacy cases, PV version < 6. + This function calculates the number of slice packs, the number of slices in each pack, + and the slice distances for legacy cases. + """ + num_slice_packs = 1 + with contextlib.suppress(AttributeError): + phase_enc_dir = getattr(analobj, 'visu_acq_image_phase_enc_dir') + phase_enc_dir = [phase_enc_dir[0]] if is_all_element_same(phase_enc_dir) else phase_enc_dir + num_slice_packs = len(phase_enc_dir) + + shape = fg_info['shape'] + num_slices_each_pack = [] + with contextlib.suppress(ValueError): + slice_fid = fg_info['id'].index('FG_SLICE') + if num_slice_packs > 1: + num_slices_each_pack = [int(shape[slice_fid]/num_slice_packs) for _ in range(num_slice_packs)] + else: + num_slices_each_pack = [shape[slice_fid]] + + slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] + if len(slice_fg): + if num_slice_packs > 1: + num_slices_each_pack.extend( + int(shape[0] / num_slice_packs) + for _ in range(num_slice_packs) + ) + else: + num_slices_each_pack.append(shape[0]) + slice_distances_each_pack = [getattr(analobj, 'visu_core_frame_thickness') for _ in range(num_slice_packs)] + return num_slice_packs, num_slices_each_pack, slice_distances_each_pack + + def _parse_6to360(self, analobj, fg_info): + """ + Parses slice description for cases with PV version 6 to 360 slices. + This function calculates the number of slice packs, the number of slices in each pack, + and the slice distances for cases with 6 to 360 slices. + """ + if hasattr(analobj, 'visu_core_slice_packs_def'): + num_slice_packs = getattr(analobj, 'visu_core_slice_packs_def')[0][1] + else: + num_slice_packs = 1 + slices_desc_in_pack = getattr(analobj, 'visu_core_slice_packs_slices') \ + if hasattr(analobj, 'visu_core_slice_packs_slices') else [] + slice_distance = getattr(analobj, 'visu_core_slice_packs_slice_dist') \ + if hasattr(analobj, 'visu_core_slice_packs_slice_dist') else [] + + slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] + if len(slice_fg): + if len(slices_desc_in_pack): + num_slices_each_pack = [slices_desc_in_pack[0][1] for _ in range(num_slice_packs)] + else: + num_slices_each_pack = [1] + if isinstance(slice_distance, list): + slice_distances_each_pack = [slice_distance[0] for _ in range(num_slice_packs)] + elif isinstance(slice_distance, (int, float)): + slice_distances_each_pack = [slice_distance for _ in range(num_slice_packs)] + else: + self._warn("Not supported data type for Slice Distance") + else: + num_slices_each_pack = [1] + slice_distances_each_pack = [getattr(analobj, 'visu_core_frame_thickness')] + return num_slice_packs, num_slices_each_pack, slice_distances_each_pack + + def get_info(self): + return { + 'num_slice_packs': self.num_slice_packs, + 'num_slices_each_pack': self.num_slices_each_pack, + 'slice_distances_each_pack': self.slice_distances_each_pack, + 'slice_distance_unit': 'mm', + 'reverse_slice_order': self.is_reverse, + 'warns': self.warns + } + + +class FrameGroup(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + if hasattr(analobj, 'visu_fg_order_desc_dim'): + self.exists = True + self.type = getattr(analobj, 'visu_core_frame_type') \ + if hasattr(analobj, 'visu_core_frame_type') else None + self.shape = [] + self.id = [] + self.comment = [] + self.dependent_vals = [] + for (shape, fgid, comment, vals_start, vals_cnt) in getattr(analobj, 'visu_fg_order_desc'): + self.shape.append(shape) + self.id.append(fgid) + self.comment.append(comment) + self.dependent_vals.append([ + getattr(analobj, 'visu_group_dep_vals')[vals_start + count] + for count in range(vals_cnt) + ] if vals_cnt else []) + self.size = reduce(lambda x, y: x * y, self.shape) + else: + self.exists = False + self._warn('frame group information') + + def get_info(self): + if not self.exists: + return None + return { + 'type': self.type, + 'size': self.size, + 'shape': self.shape, + 'id': self.id, + 'comment': self.comment, + 'dependent_vals': self.dependent_vals, + 'warns': self.warns + } + + +class Cycle(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + scan_time = getattr(analobj, 'visu_acq_scan_time') or 0 + fg_info = analobj.info_frame_group if hasattr(analobj, 'info_frame_group') else FrameGroup(analobj).get_info() + fg_not_slice = [] + if fg_info != None and fg_info['type'] != None: + fg_not_slice.extend([fg_info['shape'][id] for id, fg in enumerate(fg_info['id']) + if not re.search('slice', fg, re.IGNORECASE)]) + self.num_frames = np.prod(fg_not_slice) if len(fg_not_slice) else 1 + self.time_step = (scan_time / self.num_frames) + + def get_info(self): + return { + "num_frames": self.num_frames, + "time_step": self.time_step, + "unit": 'msec', + 'warns': self.warns + } + + +class Image(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + self.dim = getattr(analobj, 'visu_core_dim') + self.dim_desc = getattr(analobj, 'visu_core_dim_desc') + fov = getattr(analobj, 'visu_core_extent') if hasattr(analobj, 'visu_core_extent') else None + shape = getattr(analobj, 'visu_core_size') if hasattr(analobj, 'visu_core_size') else None + self.resolusion = np.divide(fov, shape).tolist() if (fov and shape) else None + self.field_of_view = fov + self.shape = shape + + if self.dim > 3: + self._warn('Image dimension larger than 3') + message = lambda x: f'image contains {x} dimension' + if isinstance(self.dim_desc, list): + for d in self.dim_desc: + if d != 'spatial': + self._warn(message(d)) + elif isinstance(self.dim_desc, str): + if self.dim_desc != 'spatial': + self._warn(message(self.dim_desc)) + + def get_info(self): + return { + 'dim': self.dim, + 'dim_desc': self.dim_desc, + 'shape': self.shape, + 'resolution': self.resolusion, + 'field_of_view': self.field_of_view, + 'unit': 'mm', + 'warns': self.warns + } + + +class Orientation(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + info_slicepack = analobj.info_slicepack if hasattr(analobj, 'info_slicepack') else SlicePack(analobj).get_info() + self.subject_type = getattr(analobj, 'visu_subject_type') \ + if hasattr(analobj, 'visu_subject_type') else None + self.subject_position = getattr(analobj, 'visu_subject_position') \ + if hasattr(analobj, 'visu_subject_position') else None + self._orient = getattr(analobj, 'visu_core_orientation').tolist() + self._position = getattr(analobj, 'visu_core_position') + self.gradient_orient = getattr(analobj, 'pvm_s_pack_arr_grad_orient') + self.num_slice_packs = info_slicepack['num_slice_packs'] + self.gradient_encoding_dir = self._get_gradient_encoding_dir(analobj) + self.orientation = [] + self.orientation_desc = [] + self.volume_origin = [] + + if self.num_slice_packs > 1: + self._case_multi_slicepacks() + else: + self._case_single_slicepack() + + def get_info(self): + return { + 'subject_type': self.subject_type, + 'subject_position': self.subject_position, + 'volume_origin': self.volume_origin, + 'orientation': self.orientation, + 'orientation_desc': self.orientation_desc, + 'gradient_orient': self.gradient_orient + } + + def _case_multi_slicepacks(self): + if len(self._orient) != self.num_slice_packs: + self._case_multi_slicepacks_multi_slices() + self.is_msp_ms = True + else: + self.is_msp_ms = False + + for id, ori in enumerate(self._orient): + rs_ori = np.array(ori).reshape((3,3)) + self.orientation.append(rs_ori) + self.orientation_desc.append(self._get_orient_axis(rs_ori)) + if self.is_msp_ms: + self.volume_origin.append(self._est_volume_origin(id)) + else: + self.volume_origin.append(self._position[id]) + + def _case_single_slicepack(self): + if is_all_element_same(self._orient): + self.orientation = np.array(self._orient[0]).reshape((3,3)) + self.orientation_desc = self._get_orient_axis(self.orientation) + self.volume_origin = self._est_volume_origin() + else: + raise NotImplementedError + + def _case_multi_slicepacks_multi_slices(self): + if not self.num_slice_packs % len(self._orient): + raise NotImplementedError + start = 0 + num_slices = int(len(self._orient) / self.num_slice_packs) + orientation = [] + positions = [] + for _ in range(self.num_slice_packs): + ori_stack = self._orient[start:start + num_slices] + pos_stack = self._position[start:start + num_slices] + if is_all_element_same(ori_stack): + orientation.append(ori_stack[0]) + positions.append(pos_stack) + start += num_slices + self._orient = orientation + self._position = positions + + def _est_volume_origin(self, id: int|None =None): + """Estimate the origin coordinates of the Volume matrix. + + Notes: + This code has been tested on a limited dataset and may generate mis-estimations. + + Returns: + list: x, y, z coordinates of the volume origin + """ + position = self._position[0] if isinstance(self._position, list) else self._position + position = position[id] if id != None else position + + dx, dy, dz = map(lambda x: x.max() - x.min(), position.T) + max_diff_axis = np.argmax([dx, dy, dz]) + + if not isinstance(self.gradient_orient, np.ndarray): + return self._est_origin_legacy(position, max_diff_axis) + zmat = np.zeros(self.gradient_orient[0].shape) + for cid, col in enumerate(self.gradient_orient[0].T): + yid = np.argmax(abs(col)) + zmat[cid, yid] = np.round(col[yid], decimals=0) + rx, ry, rz = self._calc_eulerangle(np.round(zmat.T)) + return self._est_origin_pv6to360(position, max_diff_axis, rx, ry, rz) + + @staticmethod + def _est_origin_legacy(position, max_diff_axis): + """sub-method to estimate origin coordinate from PV version < 6 + + Args: + max_diff_axis (int): The index of the maximum difference axis. + + Returns: + numpy.ndarray: The origin coordinate based on the maximum difference axis. + """ + if max_diff_axis in [0, 1]: + idx = position.T[max_diff_axis].argmax() + elif max_diff_axis == 2: + idx = position.T[max_diff_axis].argmin() + else: + raise NotImplementedError + return position[idx] + + @staticmethod + def _est_origin_pv6to360(position, max_diff_axis, rx, ry, rz): + """sub-method to estimate origin coordinate from PV version >= 6 + + Args: + max_diff_axis (int): The index of the maximum difference axis. + rx: calculated eulerangle of x axis of gradient + ry: calculated eulerangle of y axis of gradient + rz: calculated eulerangle of z axis of gradient + + Returns: + numpy.ndarray: The origin coordinate based on the maximum difference axis. + """ + max_axis = position.T[max_diff_axis] + if max_diff_axis == 0: + idx = max_axis.argmin() if rx == 90 else max_axis.argmax() + elif max_diff_axis == 1: + if rx == -90 and ry == -90 or rx != -90: + idx = max_axis.argmax() + else: + idx = max_axis.argmin() + elif max_diff_axis == 2: + if (abs(ry) == 180) or ((abs(rx) == 180) and (abs(rz) == 180)): + idx = max_axis.argmax() + else: + idx = max_axis.argmin() + else: + raise NotImplementedError + return position[idx] + + @staticmethod + def _get_orient_axis(orient_matrix): + return [np.argmax(abs(orient_matrix[:, 0])), + np.argmax(abs(orient_matrix[:, 1])), + np.argmax(abs(orient_matrix[:, 2]))] + + @staticmethod + def _is_rotation_matrix(matrix): + t_matrix = np.transpose(matrix) + should_be_identity = np.dot(t_matrix, matrix) + i = np.identity(3, dtype=matrix.dtype) + n = np.linalg.norm(i - should_be_identity) + return n < 1e-6 + + @staticmethod + def _calc_eulerangle(matrix): + assert (Orientation._is_rotation_matrix(matrix)) + + sy = math.sqrt(matrix[0, 0] * matrix[0, 0] + matrix[1, 0] * matrix[1, 0]) + singular = sy < 1e-6 + if not singular: + x = math.atan2(matrix[2, 1], matrix[2, 2]) + y = math.atan2(-matrix[2, 0], sy) + z = math.atan2(matrix[1, 0], matrix[0, 0]) + else: + x = math.atan2(-matrix[1, 2], matrix[1, 1]) + y = math.atan2(-matrix[2, 0], sy) + z = 0 + return np.array([math.degrees(x), + math.degrees(y), + math.degrees(z)]) + + @classmethod + def _get_gradient_encoding_dir(cls, analobj: 'ScanInfoAnalyzer'): + if analobj.visu_version != 1: + return getattr(analobj, 'visu_acq_grad_encoding') + phase_enc = getattr(analobj, 'visu_acq_image_phase_enc_dir') + phase_enc = phase_enc[0] if is_all_element_same(phase_enc) else phase_enc + return ( + [cls._decode_encdir(p) for p in phase_enc] \ + if isinstance(phase_enc, list) and len(phase_enc) > 1 \ + else cls._decode_encdir(phase_enc) + ) + + @staticmethod + def _decode_encdir(enc_param): + if enc_param == 'col_dir': + return ['read_enc', 'phase_enc'] + elif enc_param == 'row_dir': + return ['phase_enc', 'read_enc'] + elif enc_param == 'col_slice_dir': + return ['read_enc', 'phase_enc', 'slice_enc'] + elif enc_param == 'row_slice_dir': + return ['phase_enc', 'read_enc', 'slice_enc'] + else: + raise NotImplementedError diff --git a/brkraw/api/loader.py b/brkraw/api/loader.py index a36741c..02579cc 100644 --- a/brkraw/api/loader.py +++ b/brkraw/api/loader.py @@ -1,6 +1,108 @@ -from .pvobj import PvDataset +from __future__ import annotations +import sys +import ctypes +from typing import Dict +from .analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer from ..config import ConfigManager +from .pvobj import PvDataset, PvScan -class BrukerLoader: + +class BrukerLoader(PvDataset): def __init__(self, path): - self._pvobj = PvDataset(path, **ConfigManager().get('spec')) + super().__init__(path, **ConfigManager().get('spec')) + self._parse_header() + + def get_scan(self, scan_id, reco_id=None, analyze=True): + """ + Get a scan object by scan ID. + """ + pvscan = super().get_scan(scan_id) + return ScanObj(pvscan=pvscan, reco_id=reco_id, + loader_address=id(self), analyze=analyze) + + def _parse_header(self) -> (Dict | None): + if not len(self._contents.keys()): + self.header = None + return + contents = self._contents if 'files' in self._contents else self._contents[list(self._contents.keys())[0]] + if subj := getattr(self, 'subject') if 'subject' in contents['files'] else None: + subj_header = getattr(subj, 'header') if subj else None + if title := subj_header['TITLE'] if subj_header else None: + pvspec = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" + if "360" in title: + entry, position = getattr(subj, "SUBJECT_study_instrument_position").split('_')[:2] + else: + entry = getattr(subj, "SUBJECT_entry").split('_')[-1] + position = getattr(subj, "SUBJECT_position").split('_')[-1] + + self.header = { + 'version': pvspec, + 'user_account': subj_header['OWNER'], + 'subject_entry': entry, + 'subject_position': position, + } + else: + self.header = None + + def info(self, io_handler=None): + io_handler = io_handler or sys.stdout + + +class ScanInfo: + def __init__(self): + pass + +class ScanObj(PvScan): + def __init__(self, pvscan: 'PvScan', reco_id: int|None = None, + loader_address: int|None = None, analyze: bool=True): + super().__init__(pvscan._scan_id, + (pvscan._rootpath, pvscan._path), + pvscan._contents, + pvscan._recos, + binary_files = pvscan._binary_files, + parameter_files = pvscan._parameter_files) + + self.reco_id = reco_id + self._loader_address = loader_address + self._pvscan_address = id(pvscan) + if analyze: + self.set_info() + + def set_info(self): + self.info = self.get_info(self.reco_id) + + def get_info(self, reco_id): + infoobj = ScanInfo() + + pvscan = self.retrieve_pvscan() + analysed = ScanInfoAnalyzer(pvscan, reco_id) + for attr_name in dir(analysed): + if 'info_' in attr_name: + attr_vals = getattr(analysed, attr_name) + setattr(infoobj, attr_name.replace('info_', ''), attr_vals) + return infoobj + + def get_affine(self, subj_type:str|None = None, subj_position:str|None = None, get_analyzer=False): + info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) + analyzer = AffineAnalyzer(info) + return analyzer if get_analyzer else analyzer.get_affine(subj_type, subj_position) + + def get_dataarray(self, reco_id: int|None = None, get_analyzer=False): + reco_id = reco_id or self.avail[0] + recoobj = self.get_reco(reco_id) + datafiles = [f for f in recoobj._contents['files'] if f in recoobj._binary_files] + if not len(datafiles): + raise FileNotFoundError('no binary file') + fileobj = recoobj._open_as_fileobject(datafiles.pop()) + info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) + analyzer = DataArrayAnalyzer(info, fileobj) + return analyzer if get_analyzer else analyzer.get_dataarray() + + def retrieve_pvscan(self): + if self._pvscan_address: + return ctypes.cast(self._pvscan_address, ctypes.py_object).value + + def retrieve_loader(self): + if self._loader_address: + return ctypes.cast(self._loader_address, ctypes.py_object).value + \ No newline at end of file From a0d8f94609b58dd6d5e58081963abad0706d9a01 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 11 Apr 2024 21:06:44 -0400 Subject: [PATCH 11/38] [update] api module --- brkraw/api/__init__.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index b669a00..39e94f0 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,3 +1,4 @@ from .loader import BrukerLoader +from ..config import ConfigManager -__all__ = ['BrukerLoader'] +__all__ = ['BrukerLoader', 'ConfigManager'] \ No newline at end of file From 20aa33d4c7d25ea11ca1cb0637e8a5625fea3d58 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 11 Apr 2024 22:42:38 -0400 Subject: [PATCH 12/38] [update] ScanObj, get_analyzer option added --- brkraw/api/loader.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/brkraw/api/loader.py b/brkraw/api/loader.py index 02579cc..52c3d7a 100644 --- a/brkraw/api/loader.py +++ b/brkraw/api/loader.py @@ -71,19 +71,24 @@ def __init__(self, pvscan: 'PvScan', reco_id: int|None = None, def set_info(self): self.info = self.get_info(self.reco_id) - def get_info(self, reco_id): + def get_info(self, reco_id, get_analyzer:bool=False): infoobj = ScanInfo() pvscan = self.retrieve_pvscan() analysed = ScanInfoAnalyzer(pvscan, reco_id) + if get_analyzer: + return analysed for attr_name in dir(analysed): if 'info_' in attr_name: attr_vals = getattr(analysed, attr_name) setattr(infoobj, attr_name.replace('info_', ''), attr_vals) return infoobj - def get_affine(self, subj_type:str|None = None, subj_position:str|None = None, get_analyzer=False): - info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) + def get_affine(self, reco_id:int|None = None, subj_type:str|None = None, subj_position:str|None = None, get_analyzer=False): + if reco_id: + info = self.get_info(reco_id) + else: + info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) analyzer = AffineAnalyzer(info) return analyzer if get_analyzer else analyzer.get_affine(subj_type, subj_position) From 6634407de78efb2332e9a82536528174fa4c4268 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 11 Apr 2024 22:43:00 -0400 Subject: [PATCH 13/38] [update] Helper class reordered --- brkraw/api/helper.py | 144 +++++++++++++++++++++---------------------- 1 file changed, 72 insertions(+), 72 deletions(-) diff --git a/brkraw/api/helper.py b/brkraw/api/helper.py index b275755..afa5d1a 100644 --- a/brkraw/api/helper.py +++ b/brkraw/api/helper.py @@ -125,7 +125,79 @@ def get_info(self): 'warns': self.warns } + +class FrameGroup(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + if hasattr(analobj, 'visu_fg_order_desc_dim'): + self.exists = True + self.type = getattr(analobj, 'visu_core_frame_type') \ + if hasattr(analobj, 'visu_core_frame_type') else None + self.shape = [] + self.id = [] + self.comment = [] + self.dependent_vals = [] + for (shape, fgid, comment, vals_start, vals_cnt) in getattr(analobj, 'visu_fg_order_desc'): + self.shape.append(shape) + self.id.append(fgid) + self.comment.append(comment) + self.dependent_vals.append([ + getattr(analobj, 'visu_group_dep_vals')[vals_start + count] + for count in range(vals_cnt) + ] if vals_cnt else []) + self.size = reduce(lambda x, y: x * y, self.shape) + else: + self.exists = False + self._warn('frame group information') + + def get_info(self): + if not self.exists: + return None + return { + 'type': self.type, + 'size': self.size, + 'shape': self.shape, + 'id': self.id, + 'comment': self.comment, + 'dependent_vals': self.dependent_vals, + 'warns': self.warns + } + + +class Image(BaseHelper): + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + self.dim = getattr(analobj, 'visu_core_dim') + self.dim_desc = getattr(analobj, 'visu_core_dim_desc') + fov = getattr(analobj, 'visu_core_extent') if hasattr(analobj, 'visu_core_extent') else None + shape = getattr(analobj, 'visu_core_size') if hasattr(analobj, 'visu_core_size') else None + self.resolusion = np.divide(fov, shape).tolist() if (fov and shape) else None + self.field_of_view = fov + self.shape = shape + + if self.dim > 3: + self._warn('Image dimension larger than 3') + message = lambda x: f'image contains {x} dimension' + if isinstance(self.dim_desc, list): + for d in self.dim_desc: + if d != 'spatial': + self._warn(message(d)) + elif isinstance(self.dim_desc, str): + if self.dim_desc != 'spatial': + self._warn(message(self.dim_desc)) + def get_info(self): + return { + 'dim': self.dim, + 'dim_desc': self.dim_desc, + 'shape': self.shape, + 'resolution': self.resolusion, + 'field_of_view': self.field_of_view, + 'unit': 'mm', + 'warns': self.warns + } + + class SlicePack(BaseHelper): def __init__(self, analobj: 'ScanInfoAnalyzer'): super().__init__() @@ -235,44 +307,6 @@ def get_info(self): } -class FrameGroup(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - if hasattr(analobj, 'visu_fg_order_desc_dim'): - self.exists = True - self.type = getattr(analobj, 'visu_core_frame_type') \ - if hasattr(analobj, 'visu_core_frame_type') else None - self.shape = [] - self.id = [] - self.comment = [] - self.dependent_vals = [] - for (shape, fgid, comment, vals_start, vals_cnt) in getattr(analobj, 'visu_fg_order_desc'): - self.shape.append(shape) - self.id.append(fgid) - self.comment.append(comment) - self.dependent_vals.append([ - getattr(analobj, 'visu_group_dep_vals')[vals_start + count] - for count in range(vals_cnt) - ] if vals_cnt else []) - self.size = reduce(lambda x, y: x * y, self.shape) - else: - self.exists = False - self._warn('frame group information') - - def get_info(self): - if not self.exists: - return None - return { - 'type': self.type, - 'size': self.size, - 'shape': self.shape, - 'id': self.id, - 'comment': self.comment, - 'dependent_vals': self.dependent_vals, - 'warns': self.warns - } - - class Cycle(BaseHelper): def __init__(self, analobj: 'ScanInfoAnalyzer'): super().__init__() @@ -292,40 +326,6 @@ def get_info(self): "unit": 'msec', 'warns': self.warns } - - -class Image(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - self.dim = getattr(analobj, 'visu_core_dim') - self.dim_desc = getattr(analobj, 'visu_core_dim_desc') - fov = getattr(analobj, 'visu_core_extent') if hasattr(analobj, 'visu_core_extent') else None - shape = getattr(analobj, 'visu_core_size') if hasattr(analobj, 'visu_core_size') else None - self.resolusion = np.divide(fov, shape).tolist() if (fov and shape) else None - self.field_of_view = fov - self.shape = shape - - if self.dim > 3: - self._warn('Image dimension larger than 3') - message = lambda x: f'image contains {x} dimension' - if isinstance(self.dim_desc, list): - for d in self.dim_desc: - if d != 'spatial': - self._warn(message(d)) - elif isinstance(self.dim_desc, str): - if self.dim_desc != 'spatial': - self._warn(message(self.dim_desc)) - - def get_info(self): - return { - 'dim': self.dim, - 'dim_desc': self.dim_desc, - 'shape': self.shape, - 'resolution': self.resolusion, - 'field_of_view': self.field_of_view, - 'unit': 'mm', - 'warns': self.warns - } class Orientation(BaseHelper): From c34cc0ebf19dde894e77510cd57bc3721996a2d2 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Thu, 11 Apr 2024 23:38:04 -0400 Subject: [PATCH 14/38] [patch] analyzer, variable sync between object issue --- brkraw/api/analyzer.py | 21 +++++++++++++-------- 1 file changed, 13 insertions(+), 8 deletions(-) diff --git a/brkraw/api/analyzer.py b/brkraw/api/analyzer.py index 83372dc..2d39bdd 100644 --- a/brkraw/api/analyzer.py +++ b/brkraw/api/analyzer.py @@ -2,6 +2,7 @@ import re from brkraw.api import helper import numpy as np +from copy import copy from typing import TYPE_CHECKING if TYPE_CHECKING: from .pvobj import PvScan @@ -101,11 +102,12 @@ def __dir__(self): class AffineAnalyzer: def __init__(self, infoobj: 'ScanInfo'): + infoobj = copy(infoobj) if infoobj.image['dim'] == 2: xr, yr = infoobj.image['resolution'] self.resolution = [(xr, yr, zr) for zr in infoobj.slicepack['slice_distances_each_pack']] elif self.info.image['dim'] == 3: - self.resolution = infoobj.image['resolution'] + self.resolution = infoobj.image['resolution'][:] else: raise NotImplementedError if infoobj.slicepack['num_slice_packs'] > 1: @@ -199,14 +201,17 @@ def _correct_orientation(cls, affine, subj_pose, subj_type): @staticmethod def _inspect_subj_info(subj_pose, subj_type): - part, side = subj_pose.split('_') - assert part in SUBJPOSE['part'], 'Invalid subject position' - assert side in SUBJPOSE['side'], 'Invalid subject position' - assert subj_type in SUBJTYPE, 'Invalid subject type' + if subj_pose: + part, side = subj_pose.split('_') + assert part in SUBJPOSE['part'], 'Invalid subject position' + assert side in SUBJPOSE['side'], 'Invalid subject position' + if subj_type: + assert subj_type in SUBJTYPE, 'Invalid subject type' class DataArrayAnalyzer: def __init__(self, infoobj: 'ScanInfo', fileobj: BufferedReader|ZipExtFile): + infoobj = copy(infoobj) self._parse_info(infoobj) self.buffer = fileobj @@ -216,13 +221,13 @@ def _parse_info(self, infoobj: 'ScanInfo'): self.slope = infoobj.dataarray['2dseq_slope'] self.offset = infoobj.dataarray['2dseq_offset'] self.dtype = infoobj.dataarray['2dseq_dtype'] - self.shape = infoobj.image['shape'] - self.shape_desc = infoobj.image['dim_desc'] + self.shape = infoobj.image['shape'][:] + self.shape_desc = infoobj.image['dim_desc'][:] if infoobj.frame_group and infoobj.frame_group['type']: self._calc_array_shape(infoobj) def _calc_array_shape(self, infoobj: 'ScanInfo'): - self.shape.extend(infoobj.frame_group['shape']) + self.shape.extend(infoobj.frame_group['shape'][:]) self.shape_desc.extend([fgid.replace('FG_', '').lower() for fgid in infoobj.frame_group['id']]) def get_dataarray(self): From 0df12bc80a7a88b9d8a8d55603374b581979191c Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Fri, 12 Apr 2024 17:14:01 -0400 Subject: [PATCH 15/38] [patch] resolve issue with open fileobject --- brkraw/api/pvobj/dataset.py | 40 +++++++++++++++++++++++++------------ 1 file changed, 27 insertions(+), 13 deletions(-) diff --git a/brkraw/api/pvobj/dataset.py b/brkraw/api/pvobj/dataset.py index ebf5357..9d3e790 100755 --- a/brkraw/api/pvobj/dataset.py +++ b/brkraw/api/pvobj/dataset.py @@ -105,19 +105,20 @@ def _open_as_fileobject(self, key): Raises: ValueError: If the key does not exist in the files. """ - contents = self._contents if 'files' in self._contents else self._contents[list(self._contents.keys())[0]] - rootpath = self._rootpath if 'files' in self._contents else self._path - files = contents.get('files') + rootpath = self._rootpath or self._path + if not self.contents: + raise ValueError(f'file not exists in "{rel_path}".') + files = self.contents.get('files') path_list = [*([str(self._scan_id)] if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] if key not in files: - if file_indexes := contents.get('file_indexes'): + if file_indexes := self.contents.get('file_indexes'): rel_path = self._path else: rel_path = os.path.join(*path_list) raise ValueError(f'file not exists in "{rel_path}".\n [{", ".join(files)}]') - if file_indexes := contents.get('file_indexes'): + if file_indexes := self.contents.get('file_indexes'): with zipfile.ZipFile(rootpath) as zf: idx = file_indexes[files.index(key)] return zf.open(zf.namelist()[idx]) @@ -135,7 +136,9 @@ def _open_as_string(self, key): Returns: list: The lines of the file as strings. """ - return self._open_as_fileobject(key).read().decode('UTF-8').split('\n') + with self._open_as_fileobject(key) as f: + string = f.read().decode('UTF-8').split('\n') + return string def __getitem__(self, key): """Returns the value associated with the given key. @@ -172,10 +175,10 @@ def __getattr__(self, key): return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) elif any(binary_key == key or binary_key.replace('.', '_') == key for binary_key in self._binary_files): return self._open_as_fileobject(key) - elif any(file_key == key or file_key.replace('.', '_') == key for file_key in self._contents['files']): + elif any(file_key == key or file_key.replace('.', '_') == key for file_key in self.contents['files']): return self._open_as_fileobject(key) else: - raise AttributeError + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") def config(self, **kwargs): """ @@ -194,6 +197,9 @@ def config(self, **kwargs): if 'parameter_files' in kwargs: self._parameter_files = kwargs['parameter_files'] + @property + def contents(self): + return self._contents def __dir__(self): return ['set_config'] @@ -210,7 +216,6 @@ class PvDataset(BaseMethods): Methods: get_scan(scan_id): Get a specific scan object by ID. - get_reco(scan_id, reco_id): Get a specific reco object by scan ID and reco ID. Properties: path (str): The path of the object. @@ -338,7 +343,7 @@ def _process_childobj(self, matched, item, **kwargs): @property def contents(self): - for _, contents in self._contents.items(): + for _, contents in super().contents.items(): if 'subject' in contents['files']: return contents @@ -439,6 +444,8 @@ def update(self, contents): Returns: None """ + if contents: + self.is_compressed = True if contents.get('file_indexes') else False self._contents = contents def set_reco(self, path, reco_id, contents, **kwargs): @@ -492,7 +499,10 @@ def path(self): Returns: str: The path. """ - return self._path + path = (self._rootpath, self._path) + if self.is_compressed: + return path + return os.path.join(*path) @property def avail(self): @@ -545,7 +555,8 @@ def __init__(self, scan_id, reco_id, pathes, contents, **kwargs): self._reco_id = reco_id self._rootpath, self._path = pathes self._contents = contents - + self.is_compressed = True if contents.get('file_indexes') else False + @property def path(self): """ @@ -554,7 +565,10 @@ def path(self): Returns: str: The path. """ - return self._path + path = (self._rootpath, self._path) + if self.is_compressed: + return path + return os.path.join(*path) def __dir__(self): return super().__dir__() + ['path'] \ No newline at end of file From 97470195567ccba46e3eb14a32e3e7744ecabfd2 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Fri, 12 Apr 2024 17:14:32 -0400 Subject: [PATCH 16/38] [update] minor update --- brkraw/api/helper.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/brkraw/api/helper.py b/brkraw/api/helper.py index afa5d1a..0c9f9d3 100644 --- a/brkraw/api/helper.py +++ b/brkraw/api/helper.py @@ -1,7 +1,7 @@ from __future__ import annotations from typing import TYPE_CHECKING if TYPE_CHECKING: - from .analyzer import ScanInfoAnalyzer, ScanInfo + from .analyzer import ScanInfoAnalyzer import re import math import warnings @@ -66,6 +66,7 @@ def rotate_affine(affine, rad_x=0, rad_y=0, rad_z=0): rotated_vec = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_vec))) return from_matvec(rotated_mat, rotated_vec) + class BaseHelper: def __init__(self): self.warns = [] From c2d3c228913d6fe4d228890fe51ca8cd3ada9f52 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Fri, 12 Apr 2024 17:15:48 -0400 Subject: [PATCH 17/38] scaffold for tonii module --- brkraw/app/tonifti.py | 84 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 brkraw/app/tonifti.py diff --git a/brkraw/app/tonifti.py b/brkraw/app/tonifti.py new file mode 100644 index 0000000..6af82e2 --- /dev/null +++ b/brkraw/app/tonifti.py @@ -0,0 +1,84 @@ +import numpy as np +from enum import Enum +from brkraw.api.loader import BrukerLoader + +class ScaleMode(Enum): + NONE = 0 + APPLY = 1 + HEADER = 2 + +class BrukerToNifti(BrukerLoader): + def __init__(self, path): + super().__init__(path) + self._cache = {} + + def info(self): + pass + + def get_scan(self, scan_id:int): + if scan_id not in self._cache.keys(): + self._cache[scan_id] = super().get_scan(scan_id) + return self._cache[scan_id] + + def get_pars(self, scan_id:int, reco_id:int|None=None): + scanobj = self.get_scan(scan_id) + return scanobj.get_info(reco_id, get_analyzer=True).get_pars(reco_id) + + def get_affine(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): + return self.get_affine_dict(scan_id, reco_id, subj_type, subj_position)['affine'] + + def get_dataobj(self, scan_id:int, reco_id:int|None=None, scale_mode:ScaleMode = ScaleMode.APPLY): + if scale_mode == ScaleMode.HEADER: + raise ValueError('HEADER not supported, use get_nifti1image instead') + data_dict = self.get_data_dict(scan_id, reco_id) + dataobj = data_dict['data_array'] + if scale_mode == ScaleMode.APPLY: + dataslp = data_dict['data_slope'] + dataoff = data_dict['data_offset'] + return dataobj + + def get_data_dict(self, scan_id:int, reco_id:int|None=None): + scanobj = self.get_scan(scan_id) + data_info = scanobj.get_dataarray(reco_id, get_analyzer=True) + axis_labels = data_info.shape_desc + slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 + dataarray = scanobj.get_dataarray(reco_id) + if slice_axis != 2: + dataarray = np.swapaxes(dataarray, slice_axis, 2) + axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] + return { + 'data_array': dataarray, + 'data_slope': data_info.slope, + 'data_offset': data_info.offset, + 'axis_labels': axis_labels + } + + def get_affine_dict(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): + scanobj = self.get_scan(scan_id) + affine_info = scanobj.get_affine(reco_id, get_analyzer=True) + subj_type = subj_type or affine_info.subj_type + subj_position = subj_position or affine_info.subj_position + affine = affine_info.get_affine(subj_type, subj_position) + return { + "num_slicepacks": len(affine) if isinstance(affine, list) else 1, + "affine": affine, + "subj_type": subj_type, + "subj_position": subj_position + } + + def get_bids_metadata(self, scan_id:int, reco_id:int|None=None, bids_recipe=None): + pars = self.get_pars(scan_id, reco_id) + + def get_bdata(self, scan_id): + method = self.get_pars(scan_id).method + + def get_nifti1header(self, scan_id:int, reco_id:int|None=None): + pars = self.get_pars(scan_id, reco_id) + + def get_nifti1image(self, scan_id:int, reco_id:int|None=None, + subj_type:str|None=None, subj_position:str|None=None, + scale_mode:ScaleMode = ScaleMode.HEADER): + data_dict = self.get_dataobj(scan_id, reco_id) + affine_dict = self.get_affine(scan_id, reco_id, subj_type, subj_position) + + \ No newline at end of file From 764c84328b8fb42577084e725d2433d5f21e61e8 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Fri, 12 Apr 2024 22:11:31 -0400 Subject: [PATCH 18/38] remove config dependency and filetype automatically loaded --- brkraw/api/pvobj/dataset.py | 114 ++++++++++++------------------------ 1 file changed, 38 insertions(+), 76 deletions(-) diff --git a/brkraw/api/pvobj/dataset.py b/brkraw/api/pvobj/dataset.py index 9d3e790..7b1275b 100755 --- a/brkraw/api/pvobj/dataset.py +++ b/brkraw/api/pvobj/dataset.py @@ -3,11 +3,8 @@ import zipfile from collections import OrderedDict from collections import defaultdict -try: - from .parser import Parameter -except ImportError: - # case for debugging - from brkraw.api.pvobj.parser import Parameter +from .parser import Parameter + class BaseMethods: """ @@ -28,21 +25,6 @@ class BaseMethods: _path = None _rootpath = None _contents = None - _parameter_files = None - _binary_files = None - - def __init__(self, **kwargs): - """ - Initialize the object. - - Args: - **kwargs: Keyword arguments for configuration options. - - Returns: - None - """ - if kwargs: - self.config(**kwargs) @staticmethod def _fetch_dir(path): @@ -152,10 +134,7 @@ def __getitem__(self, key): Raises: KeyError: If the key is not found. """ - if key in self._parameter_files: - return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) - else: - return self._open_as_fileobject(key) + return self.__getattr__(key) def __getattr__(self, key): """ @@ -171,38 +150,26 @@ def __getattr__(self, key): obj = Dataset() param = obj.some_key # Returns a Parameter object or file object. """ - if any(param_key == key or param_key.replace('.', '_') == key for param_key in self._parameter_files): - return Parameter(self._open_as_string(key), name=key, scan_id=self._scan_id, reco_id=self._reco_id) - elif any(binary_key == key or binary_key.replace('.', '_') == key for binary_key in self._binary_files): - return self._open_as_fileobject(key) - elif any(file_key == key or file_key.replace('.', '_') == key for file_key in self.contents['files']): - return self._open_as_fileobject(key) - else: - raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") - - def config(self, **kwargs): - """ - Set the configuration options for the object. - - Args: - **kwargs: Keyword arguments for the configuration options. - binary_files (list): A list of binary file names. - parameter_files (list): A list of parameter file names. - - Returns: - None - """ - if 'binary_files' in kwargs: - self._binary_files = kwargs['binary_files'] - if 'parameter_files' in kwargs: - self._parameter_files = kwargs['parameter_files'] + key = key[1:] if key.startswith('_') else key #new code + + if file := [f for f in self.contents['files'] if (f == key or f.replace('.', '_') == key)]: + fileobj = self._open_as_fileobject(file.pop()) + if self._is_binary(fileobj): + return fileobj + par = Parameter(fileobj.read().decode('UTF-8').split('\n'), + name=key, scan_id=self._scan_id, reco_id=self._reco_id) + return par if par.header else fileobj.read().decode('UTF-8').split('\n') + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") @property def contents(self): return self._contents - def __dir__(self): - return ['set_config'] + @staticmethod + def _is_binary(fileobj, bytes=512): + block = fileobj.read(bytes) + fileobj.seek(0) + return b'\x00' in block class PvDataset(BaseMethods): @@ -222,7 +189,7 @@ class PvDataset(BaseMethods): avail (list): A list of available scans. contents (dict): A dictionary of pvdataset contents. """ - def __init__(self, path, debug=False, **kwargs): + def __init__(self, path, debug=False): """ Initialize the object with the given path and optional debug flag. @@ -241,11 +208,9 @@ def __init__(self, path, debug=False, **kwargs): obj = ClassName(path='/path/to/dataset', debug=True) """ - if 'pvdataset' in kwargs: - super().__init__(**kwargs['pvdataset']) if not debug: self._check_dataset_validity(path) - self._construct(**kwargs) + self._construct() # internal method def _check_dataset_validity(self, path): @@ -277,7 +242,7 @@ def _check_dataset_validity(self, path): else: raise ValueError(f"The path '{self._path}' does not meet the required criteria.") - def _construct(self, **kwargs): # sourcery skip: low-code-quality + def _construct(self): # sourcery skip: low-code-quality """ Constructs the object by organizing the contents. @@ -302,10 +267,10 @@ def _construct(self, **kwargs): # sourcery skip: low-code-quality elif not contents['files']: to_remove.append(path) elif matched := re.match(r'(?:.*/)?(\d+)/(\D+)/(\d+)$', path) or re.match(r'(?:.*/)?(\d+)$', path): - to_remove.append(self._process_childobj(matched, (path, contents), **kwargs)) + to_remove.append(self._process_childobj(matched, (path, contents))) self._clear_contents(to_remove) - def _process_childobj(self, matched, item, **kwargs): + def _process_childobj(self, matched, item): """ The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. @@ -329,14 +294,12 @@ def _process_childobj(self, matched, item, **kwargs): path, contents = item scan_id = int(matched.group(1)) if scan_id not in self._scans: - pvscan_kwargs = kwargs.get('pvscan') or {} - self._scans[scan_id] = PvScan(scan_id, (self.path, path), **pvscan_kwargs) + self._scans[scan_id] = PvScan(scan_id, (self.path, path)) if len(matched.groups()) == 1 and 'pdata' in contents['dirs']: self._scans[scan_id].update(contents) elif len(matched.groups()) == 3 and matched.group(2) == 'pdata': reco_id = int(matched.group(3)) - pvreco_kwargs = kwargs.get('pvreco') or {} - self._scans[scan_id].set_reco(path, reco_id, contents, **pvreco_kwargs) + self._scans[scan_id].set_reco(path, reco_id, contents) else: self._backup[path] = contents return path @@ -400,6 +363,9 @@ class PvScan(BaseMethods): Inherits from BaseMethods. + Attributes: + is_compressed (bool): Indicates if the dataset is compressed. + Methods: update(contents): Update the contents of the dataset. set_reco(path, reco_id, contents): Set a reco object with the specified path, ID, and contents. @@ -408,8 +374,9 @@ class PvScan(BaseMethods): Properties: path (str): The path. avail (list): A list of available items. + contents (dict): A dictionary of pvscan contents. """ - def __init__(self, scan_id, pathes, contents=None, recos=None, **kwargs): + def __init__(self, scan_id, pathes, contents=None, recos=None): """ Initialize a Dataset object. @@ -428,7 +395,6 @@ def __init__(self, scan_id, pathes, contents=None, recos=None, **kwargs): Methods: update(contents): Update the contents of the dataset. """ - super().__init__(**kwargs) self._scan_id = scan_id self._rootpath, self._path = pathes self.update(contents) @@ -448,7 +414,7 @@ def update(self, contents): self.is_compressed = True if contents.get('file_indexes') else False self._contents = contents - def set_reco(self, path, reco_id, contents, **kwargs): + def set_reco(self, path, reco_id, contents): """ Set a reco object with the specified path, ID, and contents. @@ -460,7 +426,7 @@ def set_reco(self, path, reco_id, contents, **kwargs): Returns: None """ - self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents, **kwargs) + self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) def get_reco(self, reco_id): """ @@ -480,11 +446,11 @@ def get_reco(self, reco_id): def get_visu_pars(self, reco_id=None): if reco_id: return getattr(self.get_reco(reco_id), 'visu_pars') - elif 'visu_pars' in self._contents['files']: + elif 'visu_pars' in self.contents['files']: return getattr(self, 'visu_pars') elif len(self.avail): recoobj = self.get_reco(self.avail[0]) - if 'visu_pars' not in recoobj._contents['files']: + if 'visu_pars' not in recoobj.contents['files']: raise FileNotFoundError else: return getattr(recoobj, 'visu_pars') @@ -514,15 +480,15 @@ def avail(self): """ return sorted(list(self._recos)) - def __dir__(self): - return super().__dir__() + ['path', 'avail', 'get_reco', 'get_visu_pars'] - class PvReco(BaseMethods): """ A class representing a PvReco object. Inherits from BaseMethods. + + Attributes: + is_compressed (bool): Indicates if the dataset is compressed. Args: scan_id (int): The ID of the scan. @@ -533,7 +499,7 @@ class PvReco(BaseMethods): Properties: path (str): The path. """ - def __init__(self, scan_id, reco_id, pathes, contents, **kwargs): + def __init__(self, scan_id, reco_id, pathes, contents): """ Initialize a Dataset object. @@ -550,7 +516,6 @@ def __init__(self, scan_id, reco_id, pathes, contents, **kwargs): _path (str): The path. _contents (list): The list of contents. """ - super().__init__(**kwargs) self._scan_id = scan_id self._reco_id = reco_id self._rootpath, self._path = pathes @@ -569,6 +534,3 @@ def path(self): if self.is_compressed: return path return os.path.join(*path) - - def __dir__(self): - return super().__dir__() + ['path'] \ No newline at end of file From 970d6545182fc198606bdbab0d8e72f0929cd60c Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:20:57 -0400 Subject: [PATCH 19/38] [update] helper module to folder --- brkraw/api/helper.py | 532 ------------------------------- brkraw/api/helper/__init__.py | 10 + brkraw/api/helper/base.py | 20 ++ brkraw/api/helper/cycle.py | 37 +++ brkraw/api/helper/dataarray.py | 59 ++++ brkraw/api/helper/frame_group.py | 55 ++++ brkraw/api/helper/image.py | 48 +++ brkraw/api/helper/orientation.py | 263 +++++++++++++++ brkraw/api/helper/protocol.py | 39 +++ brkraw/api/helper/slicepack.py | 124 +++++++ 10 files changed, 655 insertions(+), 532 deletions(-) delete mode 100644 brkraw/api/helper.py create mode 100644 brkraw/api/helper/__init__.py create mode 100644 brkraw/api/helper/base.py create mode 100644 brkraw/api/helper/cycle.py create mode 100644 brkraw/api/helper/dataarray.py create mode 100644 brkraw/api/helper/frame_group.py create mode 100644 brkraw/api/helper/image.py create mode 100644 brkraw/api/helper/orientation.py create mode 100644 brkraw/api/helper/protocol.py create mode 100644 brkraw/api/helper/slicepack.py diff --git a/brkraw/api/helper.py b/brkraw/api/helper.py deleted file mode 100644 index 0c9f9d3..0000000 --- a/brkraw/api/helper.py +++ /dev/null @@ -1,532 +0,0 @@ -from __future__ import annotations -from typing import TYPE_CHECKING -if TYPE_CHECKING: - from .analyzer import ScanInfoAnalyzer -import re -import math -import warnings -import contextlib -import numpy as np -from functools import partial, reduce - -WORDTYPE = \ - dict(_32BIT_SGN_INT = 'i', - _16BIT_SGN_INT = 'h', - _8BIT_UNSGN_INT = 'B', - _32BIT_FLOAT = 'f') -BYTEORDER = \ - dict(littleEndian = '<', - bigEndian = '>') - -def is_all_element_same(listobj): - if listobj is None: - return True - else: - return all(map(partial(lambda x, y: x == y, y=listobj[0]), listobj)) - -def from_matvec(mat, vec): - """Create an affine transformation matrix from a matrix and a vector.""" - if mat.shape == (3, 3) and vec.shape == (3,): - affine = np.eye(4) - affine[:3, :3] = mat - affine[:3, 3] = vec - return affine - else: - raise ValueError("Matrix must be 3x3 and vector must be 1x3") - -def to_matvec(affine): - """ - Decompose a 4x4 affine matrix into a 3x3 matrix and a 1x3 vector. - - Parameters: - affine (numpy.ndarray): A 4x4 affine transformation matrix. - - Returns: - tuple: A 3x3 matrix and a 1x3 vector. - """ - if affine.shape != (4, 4): - raise ValueError("Affine matrix must be 4x4") - mat = affine[:3, :3] - vec = affine[:3, 3] - return mat, vec - -def rotate_affine(affine, rad_x=0, rad_y=0, rad_z=0): - ''' axis = x or y or z ''' - rmat = dict(x = np.array([[1, 0, 0], - [0, np.cos(rad_x), -np.sin(rad_x)], - [0, np.sin(rad_x), np.cos(rad_x)]]).astype('float'), - y = np.array([[np.cos(rad_y), 0, np.sin(rad_y)], - [0, 1, 0], - [-np.sin(rad_y), 0, np.cos(rad_y)]]).astype('float'), - z = np.array([[np.cos(rad_z), -np.sin(rad_z), 0], - [np.sin(rad_z), np.cos(rad_z), 0], - [0, 0, 1]]).astype('float')) - af_mat, af_vec = to_matvec(affine) - rotated_mat = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_mat))) - rotated_vec = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_vec))) - return from_matvec(rotated_mat, rotated_vec) - - -class BaseHelper: - def __init__(self): - self.warns = [] - - def _warn(self, message): - warnings.warn(message, UserWarning) - self.warns.append(message) - - -class Protocol(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - acqp = analobj.pars.acqp - method = analobj.pars.method - visu = analobj.pars.visu - - self.pv_version = str(visu['VisuCreatorVersion']) if visu else None - self.pulse_program = acqp['PULPROG'] - self.scan_name = acqp['ACQ_scan_name'] - self.scan_method = method['Method'] - if visu is None: - self._warn("visu_pars not found") - - def get_info(self): - return { - 'pv_version': self.pv_version, - 'pulse_program': self.pulse_program, - 'scan_name': self.scan_name, - 'scan_method': self.scan_method, - 'warns': [] - } - - -class DataArray(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - fid_word_type = f'_{"".join(analobj.pars.acqp["ACQ_word_size"].split("_"))}_SGN_INT' - fid_byte_order = f'{analobj.pars.acqp["BYTORDA"]}Endian' - self.fid_dtype = np.dtype(f'{BYTEORDER[fid_byte_order]}{WORDTYPE[fid_word_type]}') - - byte_order = getattr(analobj, 'visu_core_byte_order') - word_type = getattr(analobj, 'visu_core_word_type') - self.data_dtype = np.dtype(f'{BYTEORDER[byte_order]}{WORDTYPE[word_type]}') - data_slope = getattr(analobj, 'visu_core_data_slope') - data_offset = getattr(analobj, 'visu_core_data_offs') - self.data_slope = data_slope[0] \ - if isinstance(data_slope, list) and is_all_element_same(data_slope) else data_slope - self.data_offset = data_offset[0] \ - if isinstance(data_offset, list) and is_all_element_same(data_offset) else data_offset - - def get_info(self): - return { - 'fid_dtype': self.fid_dtype, - '2dseq_dtype': self.data_dtype, - '2dseq_slope': self.data_slope, - '2dseq_offset': self.data_offset, - 'warns': self.warns - } - - -class FrameGroup(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - if hasattr(analobj, 'visu_fg_order_desc_dim'): - self.exists = True - self.type = getattr(analobj, 'visu_core_frame_type') \ - if hasattr(analobj, 'visu_core_frame_type') else None - self.shape = [] - self.id = [] - self.comment = [] - self.dependent_vals = [] - for (shape, fgid, comment, vals_start, vals_cnt) in getattr(analobj, 'visu_fg_order_desc'): - self.shape.append(shape) - self.id.append(fgid) - self.comment.append(comment) - self.dependent_vals.append([ - getattr(analobj, 'visu_group_dep_vals')[vals_start + count] - for count in range(vals_cnt) - ] if vals_cnt else []) - self.size = reduce(lambda x, y: x * y, self.shape) - else: - self.exists = False - self._warn('frame group information') - - def get_info(self): - if not self.exists: - return None - return { - 'type': self.type, - 'size': self.size, - 'shape': self.shape, - 'id': self.id, - 'comment': self.comment, - 'dependent_vals': self.dependent_vals, - 'warns': self.warns - } - - -class Image(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - self.dim = getattr(analobj, 'visu_core_dim') - self.dim_desc = getattr(analobj, 'visu_core_dim_desc') - fov = getattr(analobj, 'visu_core_extent') if hasattr(analobj, 'visu_core_extent') else None - shape = getattr(analobj, 'visu_core_size') if hasattr(analobj, 'visu_core_size') else None - self.resolusion = np.divide(fov, shape).tolist() if (fov and shape) else None - self.field_of_view = fov - self.shape = shape - - if self.dim > 3: - self._warn('Image dimension larger than 3') - message = lambda x: f'image contains {x} dimension' - if isinstance(self.dim_desc, list): - for d in self.dim_desc: - if d != 'spatial': - self._warn(message(d)) - elif isinstance(self.dim_desc, str): - if self.dim_desc != 'spatial': - self._warn(message(self.dim_desc)) - - def get_info(self): - return { - 'dim': self.dim, - 'dim_desc': self.dim_desc, - 'shape': self.shape, - 'resolution': self.resolusion, - 'field_of_view': self.field_of_view, - 'unit': 'mm', - 'warns': self.warns - } - - -class SlicePack(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - fg_info = analobj.info_frame_group if hasattr(analobj, 'info_frame_group') else FrameGroup(analobj).get_info() - img_info = analobj.info_image if hasattr(analobj, 'info_image') else Image(analobj).get_info() - if fg_info is None or fg_info['type'] is None: - num_slice_packs = 1 - num_slices_each_pack = [getattr(analobj, 'visu_core_frame_count')] - slice_distances_each_pack = [getattr(analobj, 'visu_core_frame_thickness')] \ - if img_info['dim'] > 1 else [] - else: - if analobj.visu_version == 1: - parser = self._parse_legacy - else: - parser = self._parse_6to360 - - num_slice_packs, num_slices_each_pack, slice_distances_each_pack = parser(analobj, fg_info) - if len(slice_distances_each_pack): - for i, d in enumerate(slice_distances_each_pack): - if d == 0: - slice_distances_each_pack[i] = getattr(analobj, 'visu_core_frame_thickness') - if not len(num_slices_each_pack): - num_slices_each_pack = [1] - - self.num_slice_packs = num_slice_packs - self.num_slices_each_pack = num_slices_each_pack - self.slice_distances_each_pack = slice_distances_each_pack - - disk_slice_order = getattr(analobj, 'visu_core_disk_slice_order') if hasattr(analobj, 'visu_core_disk_slice_order') else 'normal' - self.is_reverse = 'reverse' in disk_slice_order - if analobj.visu_version not in (1, 3, 4, 5): - self._warn(f'Parameters with current Visu Version has not been tested: v{analobj.visu_version}') - - def _parse_legacy(self, analobj, fg_info): - """ - Parses slice description for legacy cases, PV version < 6. - This function calculates the number of slice packs, the number of slices in each pack, - and the slice distances for legacy cases. - """ - num_slice_packs = 1 - with contextlib.suppress(AttributeError): - phase_enc_dir = getattr(analobj, 'visu_acq_image_phase_enc_dir') - phase_enc_dir = [phase_enc_dir[0]] if is_all_element_same(phase_enc_dir) else phase_enc_dir - num_slice_packs = len(phase_enc_dir) - - shape = fg_info['shape'] - num_slices_each_pack = [] - with contextlib.suppress(ValueError): - slice_fid = fg_info['id'].index('FG_SLICE') - if num_slice_packs > 1: - num_slices_each_pack = [int(shape[slice_fid]/num_slice_packs) for _ in range(num_slice_packs)] - else: - num_slices_each_pack = [shape[slice_fid]] - - slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] - if len(slice_fg): - if num_slice_packs > 1: - num_slices_each_pack.extend( - int(shape[0] / num_slice_packs) - for _ in range(num_slice_packs) - ) - else: - num_slices_each_pack.append(shape[0]) - slice_distances_each_pack = [getattr(analobj, 'visu_core_frame_thickness') for _ in range(num_slice_packs)] - return num_slice_packs, num_slices_each_pack, slice_distances_each_pack - - def _parse_6to360(self, analobj, fg_info): - """ - Parses slice description for cases with PV version 6 to 360 slices. - This function calculates the number of slice packs, the number of slices in each pack, - and the slice distances for cases with 6 to 360 slices. - """ - if hasattr(analobj, 'visu_core_slice_packs_def'): - num_slice_packs = getattr(analobj, 'visu_core_slice_packs_def')[0][1] - else: - num_slice_packs = 1 - slices_desc_in_pack = getattr(analobj, 'visu_core_slice_packs_slices') \ - if hasattr(analobj, 'visu_core_slice_packs_slices') else [] - slice_distance = getattr(analobj, 'visu_core_slice_packs_slice_dist') \ - if hasattr(analobj, 'visu_core_slice_packs_slice_dist') else [] - - slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] - if len(slice_fg): - if len(slices_desc_in_pack): - num_slices_each_pack = [slices_desc_in_pack[0][1] for _ in range(num_slice_packs)] - else: - num_slices_each_pack = [1] - if isinstance(slice_distance, list): - slice_distances_each_pack = [slice_distance[0] for _ in range(num_slice_packs)] - elif isinstance(slice_distance, (int, float)): - slice_distances_each_pack = [slice_distance for _ in range(num_slice_packs)] - else: - self._warn("Not supported data type for Slice Distance") - else: - num_slices_each_pack = [1] - slice_distances_each_pack = [getattr(analobj, 'visu_core_frame_thickness')] - return num_slice_packs, num_slices_each_pack, slice_distances_each_pack - - def get_info(self): - return { - 'num_slice_packs': self.num_slice_packs, - 'num_slices_each_pack': self.num_slices_each_pack, - 'slice_distances_each_pack': self.slice_distances_each_pack, - 'slice_distance_unit': 'mm', - 'reverse_slice_order': self.is_reverse, - 'warns': self.warns - } - - -class Cycle(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - scan_time = getattr(analobj, 'visu_acq_scan_time') or 0 - fg_info = analobj.info_frame_group if hasattr(analobj, 'info_frame_group') else FrameGroup(analobj).get_info() - fg_not_slice = [] - if fg_info != None and fg_info['type'] != None: - fg_not_slice.extend([fg_info['shape'][id] for id, fg in enumerate(fg_info['id']) - if not re.search('slice', fg, re.IGNORECASE)]) - self.num_frames = np.prod(fg_not_slice) if len(fg_not_slice) else 1 - self.time_step = (scan_time / self.num_frames) - - def get_info(self): - return { - "num_frames": self.num_frames, - "time_step": self.time_step, - "unit": 'msec', - 'warns': self.warns - } - - -class Orientation(BaseHelper): - def __init__(self, analobj: 'ScanInfoAnalyzer'): - super().__init__() - info_slicepack = analobj.info_slicepack if hasattr(analobj, 'info_slicepack') else SlicePack(analobj).get_info() - self.subject_type = getattr(analobj, 'visu_subject_type') \ - if hasattr(analobj, 'visu_subject_type') else None - self.subject_position = getattr(analobj, 'visu_subject_position') \ - if hasattr(analobj, 'visu_subject_position') else None - self._orient = getattr(analobj, 'visu_core_orientation').tolist() - self._position = getattr(analobj, 'visu_core_position') - self.gradient_orient = getattr(analobj, 'pvm_s_pack_arr_grad_orient') - self.num_slice_packs = info_slicepack['num_slice_packs'] - self.gradient_encoding_dir = self._get_gradient_encoding_dir(analobj) - self.orientation = [] - self.orientation_desc = [] - self.volume_origin = [] - - if self.num_slice_packs > 1: - self._case_multi_slicepacks() - else: - self._case_single_slicepack() - - def get_info(self): - return { - 'subject_type': self.subject_type, - 'subject_position': self.subject_position, - 'volume_origin': self.volume_origin, - 'orientation': self.orientation, - 'orientation_desc': self.orientation_desc, - 'gradient_orient': self.gradient_orient - } - - def _case_multi_slicepacks(self): - if len(self._orient) != self.num_slice_packs: - self._case_multi_slicepacks_multi_slices() - self.is_msp_ms = True - else: - self.is_msp_ms = False - - for id, ori in enumerate(self._orient): - rs_ori = np.array(ori).reshape((3,3)) - self.orientation.append(rs_ori) - self.orientation_desc.append(self._get_orient_axis(rs_ori)) - if self.is_msp_ms: - self.volume_origin.append(self._est_volume_origin(id)) - else: - self.volume_origin.append(self._position[id]) - - def _case_single_slicepack(self): - if is_all_element_same(self._orient): - self.orientation = np.array(self._orient[0]).reshape((3,3)) - self.orientation_desc = self._get_orient_axis(self.orientation) - self.volume_origin = self._est_volume_origin() - else: - raise NotImplementedError - - def _case_multi_slicepacks_multi_slices(self): - if not self.num_slice_packs % len(self._orient): - raise NotImplementedError - start = 0 - num_slices = int(len(self._orient) / self.num_slice_packs) - orientation = [] - positions = [] - for _ in range(self.num_slice_packs): - ori_stack = self._orient[start:start + num_slices] - pos_stack = self._position[start:start + num_slices] - if is_all_element_same(ori_stack): - orientation.append(ori_stack[0]) - positions.append(pos_stack) - start += num_slices - self._orient = orientation - self._position = positions - - def _est_volume_origin(self, id: int|None =None): - """Estimate the origin coordinates of the Volume matrix. - - Notes: - This code has been tested on a limited dataset and may generate mis-estimations. - - Returns: - list: x, y, z coordinates of the volume origin - """ - position = self._position[0] if isinstance(self._position, list) else self._position - position = position[id] if id != None else position - - dx, dy, dz = map(lambda x: x.max() - x.min(), position.T) - max_diff_axis = np.argmax([dx, dy, dz]) - - if not isinstance(self.gradient_orient, np.ndarray): - return self._est_origin_legacy(position, max_diff_axis) - zmat = np.zeros(self.gradient_orient[0].shape) - for cid, col in enumerate(self.gradient_orient[0].T): - yid = np.argmax(abs(col)) - zmat[cid, yid] = np.round(col[yid], decimals=0) - rx, ry, rz = self._calc_eulerangle(np.round(zmat.T)) - return self._est_origin_pv6to360(position, max_diff_axis, rx, ry, rz) - - @staticmethod - def _est_origin_legacy(position, max_diff_axis): - """sub-method to estimate origin coordinate from PV version < 6 - - Args: - max_diff_axis (int): The index of the maximum difference axis. - - Returns: - numpy.ndarray: The origin coordinate based on the maximum difference axis. - """ - if max_diff_axis in [0, 1]: - idx = position.T[max_diff_axis].argmax() - elif max_diff_axis == 2: - idx = position.T[max_diff_axis].argmin() - else: - raise NotImplementedError - return position[idx] - - @staticmethod - def _est_origin_pv6to360(position, max_diff_axis, rx, ry, rz): - """sub-method to estimate origin coordinate from PV version >= 6 - - Args: - max_diff_axis (int): The index of the maximum difference axis. - rx: calculated eulerangle of x axis of gradient - ry: calculated eulerangle of y axis of gradient - rz: calculated eulerangle of z axis of gradient - - Returns: - numpy.ndarray: The origin coordinate based on the maximum difference axis. - """ - max_axis = position.T[max_diff_axis] - if max_diff_axis == 0: - idx = max_axis.argmin() if rx == 90 else max_axis.argmax() - elif max_diff_axis == 1: - if rx == -90 and ry == -90 or rx != -90: - idx = max_axis.argmax() - else: - idx = max_axis.argmin() - elif max_diff_axis == 2: - if (abs(ry) == 180) or ((abs(rx) == 180) and (abs(rz) == 180)): - idx = max_axis.argmax() - else: - idx = max_axis.argmin() - else: - raise NotImplementedError - return position[idx] - - @staticmethod - def _get_orient_axis(orient_matrix): - return [np.argmax(abs(orient_matrix[:, 0])), - np.argmax(abs(orient_matrix[:, 1])), - np.argmax(abs(orient_matrix[:, 2]))] - - @staticmethod - def _is_rotation_matrix(matrix): - t_matrix = np.transpose(matrix) - should_be_identity = np.dot(t_matrix, matrix) - i = np.identity(3, dtype=matrix.dtype) - n = np.linalg.norm(i - should_be_identity) - return n < 1e-6 - - @staticmethod - def _calc_eulerangle(matrix): - assert (Orientation._is_rotation_matrix(matrix)) - - sy = math.sqrt(matrix[0, 0] * matrix[0, 0] + matrix[1, 0] * matrix[1, 0]) - singular = sy < 1e-6 - if not singular: - x = math.atan2(matrix[2, 1], matrix[2, 2]) - y = math.atan2(-matrix[2, 0], sy) - z = math.atan2(matrix[1, 0], matrix[0, 0]) - else: - x = math.atan2(-matrix[1, 2], matrix[1, 1]) - y = math.atan2(-matrix[2, 0], sy) - z = 0 - return np.array([math.degrees(x), - math.degrees(y), - math.degrees(z)]) - - @classmethod - def _get_gradient_encoding_dir(cls, analobj: 'ScanInfoAnalyzer'): - if analobj.visu_version != 1: - return getattr(analobj, 'visu_acq_grad_encoding') - phase_enc = getattr(analobj, 'visu_acq_image_phase_enc_dir') - phase_enc = phase_enc[0] if is_all_element_same(phase_enc) else phase_enc - return ( - [cls._decode_encdir(p) for p in phase_enc] \ - if isinstance(phase_enc, list) and len(phase_enc) > 1 \ - else cls._decode_encdir(phase_enc) - ) - - @staticmethod - def _decode_encdir(enc_param): - if enc_param == 'col_dir': - return ['read_enc', 'phase_enc'] - elif enc_param == 'row_dir': - return ['phase_enc', 'read_enc'] - elif enc_param == 'col_slice_dir': - return ['read_enc', 'phase_enc', 'slice_enc'] - elif enc_param == 'row_slice_dir': - return ['phase_enc', 'read_enc', 'slice_enc'] - else: - raise NotImplementedError diff --git a/brkraw/api/helper/__init__.py b/brkraw/api/helper/__init__.py new file mode 100644 index 0000000..e872f1e --- /dev/null +++ b/brkraw/api/helper/__init__.py @@ -0,0 +1,10 @@ +from .protocol import Protocol +from .frame_group import FrameGroup +from .dataarray import DataArray +from .image import Image +from .slicepack import SlicePack +from .cycle import Cycle +from .orientation import Orientation, to_matvec, from_matvec, rotate_affine + +__all__ = [Protocol, FrameGroup, DataArray, Image, SlicePack, Cycle, Orientation, + to_matvec, from_matvec, rotate_affine] \ No newline at end of file diff --git a/brkraw/api/helper/base.py b/brkraw/api/helper/base.py new file mode 100644 index 0000000..2b6fc55 --- /dev/null +++ b/brkraw/api/helper/base.py @@ -0,0 +1,20 @@ +import warnings +from functools import partial + +def is_all_element_same(listobj): + if listobj is None: + return True + else: + return all(map(partial(lambda x, y: x == y, y=listobj[0]), listobj)) + +class BaseHelper: + def __init__(self): + self.warns = [] + + def _warn(self, message): + warnings.warn(message, UserWarning) + self.warns.append(message) + + def get(self, attr): + return getattr(self, attr) if hasattr(self, attr) else None + diff --git a/brkraw/api/helper/cycle.py b/brkraw/api/helper/cycle.py new file mode 100644 index 0000000..e1afd71 --- /dev/null +++ b/brkraw/api/helper/cycle.py @@ -0,0 +1,37 @@ +from __future__ import annotations +import re +import numpy as np +from typing import TYPE_CHECKING +from .base import BaseHelper +from .frame_group import FrameGroup +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + + +class Cycle(BaseHelper): + """ + Dependencies: + FrameGroup + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + scan_time = analobj.visu_pars.get("VisuAcqScanTime") or 0 + fg_info = analobj.get('info_frame_group') or FrameGroup(analobj).get_info() + fg_not_slice = [] + if fg_info != None and fg_info['type'] != None: + fg_not_slice.extend([fg_info['shape'][id] for id, fg in enumerate(fg_info['id']) + if not re.search('slice', fg, re.IGNORECASE)]) + self.num_frames = np.prod(fg_not_slice) if len(fg_not_slice) else 1 + self.time_step = (scan_time / self.num_frames) + + def get_info(self): + return { + "num_frames": self.num_frames, + "time_step": self.time_step, + "unit": 'msec', + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/dataarray.py b/brkraw/api/helper/dataarray.py new file mode 100644 index 0000000..1ab0310 --- /dev/null +++ b/brkraw/api/helper/dataarray.py @@ -0,0 +1,59 @@ +from __future__ import annotations +import numpy as np +from typing import TYPE_CHECKING +from .base import BaseHelper, is_all_element_same +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + + +WORDTYPE = \ + dict(_32BIT_SGN_INT = 'i', + _16BIT_SGN_INT = 'h', + _8BIT_UNSGN_INT = 'B', + _32BIT_FLOAT = 'f') + +BYTEORDER = \ + dict(littleEndian = '<', + bigEndian = '>') + + +class DataArray(BaseHelper): + """requires visu_pars and aqcp to pars parameter related to the dtype of binary files + + Dependencies: + acqp + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + acqp = analobj.acqp + visu_pars = analobj.visu_pars + + fid_word_type = f'_{"".join(acqp["ACQ_word_size"].split("_"))}_SGN_INT' + fid_byte_order = f'{acqp["BYTORDA"]}Endian' + self.fid_dtype = np.dtype(f'{BYTEORDER[fid_byte_order]}{WORDTYPE[fid_word_type]}') + + byte_order = visu_pars["VisuCoreByteOrder"] + word_type = visu_pars["VisuCoreWordType"] + self.data_dtype = np.dtype(f'{BYTEORDER[byte_order]}{WORDTYPE[word_type]}') + data_slope = visu_pars["VisuCoreDataSlope"] + data_offset = visu_pars["VisuCoreDataOffs"] + self.data_slope = data_slope[0] \ + if isinstance(data_slope, list) and is_all_element_same(data_slope) else data_slope + self.data_offset = data_offset[0] \ + if isinstance(data_offset, list) and is_all_element_same(data_offset) else data_offset + if isinstance(self.data_slope, list) or isinstance(self.data_offset, list): + self._warn("Data slope and data offset values are unusual. " + "They are expected to be either a list containing the same elements or a single float value.") + + def get_info(self): + return { + 'fid_dtype': self.fid_dtype, + '2dseq_dtype': self.data_dtype, + '2dseq_slope': self.data_slope, + '2dseq_offset': self.data_offset, + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/frame_group.py b/brkraw/api/helper/frame_group.py new file mode 100644 index 0000000..daa0442 --- /dev/null +++ b/brkraw/api/helper/frame_group.py @@ -0,0 +1,55 @@ +from __future__ import annotations +from typing import TYPE_CHECKING +from .base import BaseHelper +from functools import reduce +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + + +class FrameGroup(BaseHelper): + """ + Dependencies: + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + visu_pars = analobj.visu_pars + if visu_pars.get('VisuFGOrderDescDim'): + self.exists = True + self.type = visu_pars.get("VisuCoreFrameType") + self.shape = [] + self.id = [] + self.comment = [] + self.dependent_vals = [] + for (shape, fgid, comment, vals_start, vals_cnt) in visu_pars["VisuFGOrderDesc"]: + self.shape.append(shape) + self.id.append(fgid) + self.comment.append(comment) + self.dependent_vals.append([ + visu_pars["VisuGroupDepVals"][vals_start + count] + for count in range(vals_cnt) + ] if vals_cnt else []) + self.size = reduce(lambda x, y: x * y, self.shape) + else: + self.exists = False + self._warn("Unable to construct frame group information because 'VisuFGOrderDescDim' " + "was not found in the 'visu_pars' parameter file.") + + def get_info(self): + if not self.exists: + return { + 'type': None, + 'warns': self.warns + } + return { + 'type': self.type, + 'size': self.size, + 'shape': self.shape, + 'id': self.id, + 'comment': self.comment, + 'dependent_vals': self.dependent_vals, + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/image.py b/brkraw/api/helper/image.py new file mode 100644 index 0000000..e5c911d --- /dev/null +++ b/brkraw/api/helper/image.py @@ -0,0 +1,48 @@ +from __future__ import annotations +import numpy as np +from typing import TYPE_CHECKING +from .base import BaseHelper +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + +class Image(BaseHelper): + """ + Dependencies: + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + visu_pars = analobj.visu_pars + + self.dim = visu_pars["VisuCoreDim"] + self.dim_desc = visu_pars["VisuCoreDimDesc"] + fov = visu_pars.get("VisuCoreExtent") + shape = visu_pars.get("VisuCoreSize") + self.resolusion = np.divide(fov, shape).tolist() if (fov and shape) else None + self.field_of_view = fov + self.shape = shape + + if self.dim > 3: + self._warn('Image dimension exceeds 3. Ensure that handling of higher dimensions is supported and correctly implemented.') + message = lambda x: f"The axis of the image includes '{x}' dimension, which is not limited to spatial types." + if isinstance(self.dim_desc, list): + for d in self.dim_desc: + if d != 'spatial': + self._warn(message(d)) + elif isinstance(self.dim_desc, str): + if self.dim_desc != 'spatial': + self._warn(message(self.dim_desc)) + + def get_info(self): + return { + 'dim': self.dim, + 'dim_desc': self.dim_desc, + 'shape': self.shape, + 'resolution': self.resolusion, + 'field_of_view': self.field_of_view, + 'unit': 'mm', + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/orientation.py b/brkraw/api/helper/orientation.py new file mode 100644 index 0000000..454a03f --- /dev/null +++ b/brkraw/api/helper/orientation.py @@ -0,0 +1,263 @@ +from __future__ import annotations +import math +import numpy as np +from typing import TYPE_CHECKING +from .base import BaseHelper, is_all_element_same +from .slicepack import SlicePack +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + + +def from_matvec(mat, vec): + """Create an affine transformation matrix from a matrix and a vector.""" + if mat.shape == (3, 3) and vec.shape == (3,): + affine = np.eye(4) + affine[:3, :3] = mat + affine[:3, 3] = vec + return affine + else: + raise ValueError("Matrix must be 3x3 and vector must be 1x3") + +def to_matvec(affine): + """ + Decompose a 4x4 affine matrix into a 3x3 matrix and a 1x3 vector. + + Parameters: + affine (numpy.ndarray): A 4x4 affine transformation matrix. + + Returns: + tuple: A 3x3 matrix and a 1x3 vector. + """ + if affine.shape != (4, 4): + raise ValueError("Affine matrix must be 4x4") + mat = affine[:3, :3] + vec = affine[:3, 3] + return mat, vec + +def rotate_affine(affine, rad_x=0, rad_y=0, rad_z=0): + ''' axis = x or y or z ''' + rmat = dict(x = np.array([[1, 0, 0], + [0, np.cos(rad_x), -np.sin(rad_x)], + [0, np.sin(rad_x), np.cos(rad_x)]]).astype('float'), + y = np.array([[np.cos(rad_y), 0, np.sin(rad_y)], + [0, 1, 0], + [-np.sin(rad_y), 0, np.cos(rad_y)]]).astype('float'), + z = np.array([[np.cos(rad_z), -np.sin(rad_z), 0], + [np.sin(rad_z), np.cos(rad_z), 0], + [0, 0, 1]]).astype('float')) + af_mat, af_vec = to_matvec(affine) + rotated_mat = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_mat))) + rotated_vec = rmat['z'].dot(rmat['y'].dot(rmat['x'].dot(af_vec))) + return from_matvec(rotated_mat, rotated_vec) + + +class Orientation(BaseHelper): + """ + Dependencies: + SlicePack + method + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + visu_pars = analobj.visu_pars + info_slicepack = analobj.get("info_slicepack") or SlicePack(analobj).get_info() + self.subject_type = visu_pars.get("VisuSubjectType") + self.subject_position = visu_pars.get("VisuSubjectPosition") + self._orient = visu_pars["VisuCoreOrientation"].tolist() + self._position = visu_pars["VisuCorePosition"] + self.gradient_orient = analobj.method["PVM_SPackArrGradOrient"] + self.num_slice_packs = info_slicepack['num_slice_packs'] + self.gradient_encoding_dir = self._get_gradient_encoding_dir(visu_pars) + self.orientation = [] + self.orientation_desc = [] + self.volume_origin = [] + + if self.num_slice_packs > 1: + self._case_multi_slicepacks() + else: + self._case_single_slicepack() + + def get_info(self): + return { + 'subject_type': self.subject_type, + 'subject_position': self.subject_position, + 'volume_origin': self.volume_origin, + 'orientation': self.orientation, + 'orientation_desc': self.orientation_desc, + 'gradient_orient': self.gradient_orient, + 'warns': self.warns + } + + def _case_multi_slicepacks(self): + if len(self._orient) != self.num_slice_packs: + self._case_multi_slicepacks_multi_slices() + self.is_msp_ms = True + else: + self.is_msp_ms = False + + for id, ori in enumerate(self._orient): + rs_ori = np.array(ori).reshape((3,3)) + self.orientation.append(rs_ori) + self.orientation_desc.append(self._get_orient_axis(rs_ori)) + if self.is_msp_ms: + self.volume_origin.append(self._est_volume_origin(id)) + else: + self.volume_origin.append(self._position[id]) + + def _case_single_slicepack(self): + if is_all_element_same(self._orient): + self.orientation = np.array(self._orient[0]).reshape((3,3)) + self.orientation_desc = self._get_orient_axis(self.orientation) + self.volume_origin = self._est_volume_origin() + else: + raise NotImplementedError + + def _case_multi_slicepacks_multi_slices(self): + if not self.num_slice_packs % len(self._orient): + raise NotImplementedError + start = 0 + num_slices = int(len(self._orient) / self.num_slice_packs) + orientation = [] + positions = [] + for _ in range(self.num_slice_packs): + ori_stack = self._orient[start:start + num_slices] + pos_stack = self._position[start:start + num_slices] + if is_all_element_same(ori_stack): + orientation.append(ori_stack[0]) + positions.append(pos_stack) + start += num_slices + self._orient = orientation + self._position = positions + + def _est_volume_origin(self, id: int|None =None): + """Estimate the origin coordinates of the Volume matrix. + + Notes: + This code has been tested on a limited dataset and may generate mis-estimations. + + Returns: + list: x, y, z coordinates of the volume origin + """ + position = self._position[0] if isinstance(self._position, list) else self._position + position = position[id] if id != None else position + + dx, dy, dz = map(lambda x: x.max() - x.min(), position.T) + max_diff_axis = np.argmax([dx, dy, dz]) + + if not isinstance(self.gradient_orient, np.ndarray): + return self._est_origin_legacy(position, max_diff_axis) + zmat = np.zeros(self.gradient_orient[0].shape) + for cid, col in enumerate(self.gradient_orient[0].T): + yid = np.argmax(abs(col)) + zmat[cid, yid] = np.round(col[yid], decimals=0) + rx, ry, rz = self._calc_eulerangle(np.round(zmat.T)) + return self._est_origin_pv6to360(position, max_diff_axis, rx, ry, rz) + + @staticmethod + def _est_origin_legacy(position, max_diff_axis): + """sub-method to estimate origin coordinate from PV version < 6 + + Args: + max_diff_axis (int): The index of the maximum difference axis. + + Returns: + numpy.ndarray: The origin coordinate based on the maximum difference axis. + """ + if max_diff_axis in [0, 1]: + idx = position.T[max_diff_axis].argmax() + elif max_diff_axis == 2: + idx = position.T[max_diff_axis].argmin() + else: + raise NotImplementedError + return position[idx] + + @staticmethod + def _est_origin_pv6to360(position, max_diff_axis, rx, ry, rz): + """sub-method to estimate origin coordinate from PV version >= 6 + + Args: + max_diff_axis (int): The index of the maximum difference axis. + rx: calculated eulerangle of x axis of gradient + ry: calculated eulerangle of y axis of gradient + rz: calculated eulerangle of z axis of gradient + + Returns: + numpy.ndarray: The origin coordinate based on the maximum difference axis. + """ + max_axis = position.T[max_diff_axis] + if max_diff_axis == 0: + idx = max_axis.argmin() if rx == 90 else max_axis.argmax() + elif max_diff_axis == 1: + if rx == -90 and ry == -90 or rx != -90: + idx = max_axis.argmax() + else: + idx = max_axis.argmin() + elif max_diff_axis == 2: + if (abs(ry) == 180) or ((abs(rx) == 180) and (abs(rz) == 180)): + idx = max_axis.argmax() + else: + idx = max_axis.argmin() + else: + raise NotImplementedError + return position[idx] + + @staticmethod + def _get_orient_axis(orient_matrix): + return [np.argmax(abs(orient_matrix[:, 0])), + np.argmax(abs(orient_matrix[:, 1])), + np.argmax(abs(orient_matrix[:, 2]))] + + @staticmethod + def _is_rotation_matrix(matrix): + t_matrix = np.transpose(matrix) + should_be_identity = np.dot(t_matrix, matrix) + i = np.identity(3, dtype=matrix.dtype) + n = np.linalg.norm(i - should_be_identity) + return n < 1e-6 + + @staticmethod + def _calc_eulerangle(matrix): + assert (Orientation._is_rotation_matrix(matrix)) + + sy = math.sqrt(matrix[0, 0] * matrix[0, 0] + matrix[1, 0] * matrix[1, 0]) + singular = sy < 1e-6 + if not singular: + x = math.atan2(matrix[2, 1], matrix[2, 2]) + y = math.atan2(-matrix[2, 0], sy) + z = math.atan2(matrix[1, 0], matrix[0, 0]) + else: + x = math.atan2(-matrix[1, 2], matrix[1, 1]) + y = math.atan2(-matrix[2, 0], sy) + z = 0 + return np.array([math.degrees(x), + math.degrees(y), + math.degrees(z)]) + + @classmethod + def _get_gradient_encoding_dir(cls, visu_pars): + if visu_pars["VisuVersion"] != 1: + return visu_pars["VisuAcqGradEncoding"] + phase_enc = visu_pars["VisuAcqImagePhaseEncDir"] + phase_enc = phase_enc[0] if is_all_element_same(phase_enc) else phase_enc + return ( + [cls._decode_encdir(p) for p in phase_enc] \ + if isinstance(phase_enc, list) and len(phase_enc) > 1 \ + else cls._decode_encdir(phase_enc) + ) + + @staticmethod + def _decode_encdir(enc_param): + if enc_param == 'col_dir': + return ['read_enc', 'phase_enc'] + elif enc_param == 'row_dir': + return ['phase_enc', 'read_enc'] + elif enc_param == 'col_slice_dir': + return ['read_enc', 'phase_enc', 'slice_enc'] + elif enc_param == 'row_slice_dir': + return ['phase_enc', 'read_enc', 'slice_enc'] + else: + raise NotImplementedError diff --git a/brkraw/api/helper/protocol.py b/brkraw/api/helper/protocol.py new file mode 100644 index 0000000..fb1e29d --- /dev/null +++ b/brkraw/api/helper/protocol.py @@ -0,0 +1,39 @@ +from __future__ import annotations +from typing import TYPE_CHECKING +from .base import BaseHelper +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + +class Protocol(BaseHelper): + """_summary_ + Helper class to parse protocol parameters for data acqusition form 'acqp' file + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + acqp = analobj.acqp + self.sw_version = str(acqp.get('ACQ_sw_version')) + self.operator = acqp.get('ACQ_operator') + self.pulse_program = acqp.get('PULPROG') + self.nucleus = acqp.get('NUCLEUS') + self.protocol_name = acqp.get('ACQ_protocol_name') or acqp.get('ACQ_scan_name') + self.scan_method = acqp.get('ACQ_method') + self.subject_pos = acqp.get('ACQ_patient_pos') + self.institution = acqp.get('ACQ_institution') + self.device = acqp.get('ACQ_station') + + def get_info(self): + return { + 'sw_version': self.sw_version, + 'operator': self.operator, + 'institution': self.institution, + 'device': self.device, + 'nucleus': self.nucleus, + 'subject_pos': self.subject_pos, + 'pulse_program': self.pulse_program, + 'protocol_name': self.protocol_name, + 'scan_method': self.scan_method, + 'warns': self.warns + } \ No newline at end of file diff --git a/brkraw/api/helper/slicepack.py b/brkraw/api/helper/slicepack.py new file mode 100644 index 0000000..1d03e56 --- /dev/null +++ b/brkraw/api/helper/slicepack.py @@ -0,0 +1,124 @@ +from __future__ import annotations +import contextlib +from typing import TYPE_CHECKING +from .base import BaseHelper, is_all_element_same +from .frame_group import FrameGroup +from .image import Image +if TYPE_CHECKING: + from ..analyzer import ScanInfoAnalyzer + +class SlicePack(BaseHelper): + """ + Dependencies: + FrameGroup + Image + visu_pars + + Args: + BaseHelper (_type_): _description_ + """ + def __init__(self, analobj: 'ScanInfoAnalyzer'): + super().__init__() + visu_pars = analobj.visu_pars + + fg_info = analobj.get("info_frame_group") or FrameGroup(analobj).get_info() + img_info = analobj.get("info_image") or Image(analobj).get_info() + if fg_info is None or fg_info['type'] is None: + num_slice_packs = 1 + num_slices_each_pack = [visu_pars.get("VisuCoreFrameCount")] + slice_distances_each_pack = [visu_pars.get("VisuCoreFrameThickness")] \ + if img_info['dim'] > 1 else [] + else: + if visu_pars["VisuVersion"] == 1: + parser = self._parse_legacy + else: + parser = self._parse_6to360 + + num_slice_packs, num_slices_each_pack, slice_distances_each_pack = parser(visu_pars, fg_info) + if len(slice_distances_each_pack): + for i, d in enumerate(slice_distances_each_pack): + if d == 0: + slice_distances_each_pack[i] = visu_pars["VisuCoreFrameThickness"] + if not len(num_slices_each_pack): + num_slices_each_pack = [1] + + self.num_slice_packs = num_slice_packs + self.num_slices_each_pack = num_slices_each_pack + self.slice_distances_each_pack = slice_distances_each_pack + + disk_slice_order = visu_pars.get("VisuCoreDiskSliceOrder") or 'normal' + self.is_reverse = 'reverse' in disk_slice_order + if visu_pars["VisuVersion"] not in (1, 3, 4, 5): + self._warn(f'Parameters with current Visu Version has not been tested: v{visu_pars["VisuVersion"]}') + + def _parse_legacy(self, visu_pars, fg_info): + """ + Parses slice description for legacy cases, PV version < 6. + This function calculates the number of slice packs, the number of slices in each pack, + and the slice distances for legacy cases. + """ + num_slice_packs = 1 + with contextlib.suppress(AttributeError): + phase_enc_dir = visu_pars["VisuAcqImagePhaseEncDir"] + phase_enc_dir = [phase_enc_dir[0]] if is_all_element_same(phase_enc_dir) else phase_enc_dir + num_slice_packs = len(phase_enc_dir) + + shape = fg_info['shape'] + num_slices_each_pack = [] + with contextlib.suppress(ValueError): + slice_fid = fg_info['id'].index('FG_SLICE') + if num_slice_packs > 1: + num_slices_each_pack = [int(shape[slice_fid]/num_slice_packs) for _ in range(num_slice_packs)] + else: + num_slices_each_pack = [shape[slice_fid]] + + slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] + if len(slice_fg): + if num_slice_packs > 1: + num_slices_each_pack.extend( + int(shape[0] / num_slice_packs) + for _ in range(num_slice_packs) + ) + else: + num_slices_each_pack.append(shape[0]) + slice_distances_each_pack = [visu_pars["VisuCoreFrameThickness"] for _ in range(num_slice_packs)] + return num_slice_packs, num_slices_each_pack, slice_distances_each_pack + + def _parse_6to360(self, visu_pars, fg_info): + """ + Parses slice description for cases with PV version 6 to 360 slices. + This function calculates the number of slice packs, the number of slices in each pack, + and the slice distances for cases with 6 to 360 slices. + """ + slice_packs_def = visu_pars.get("VisuCoreSlicePacksDef") + num_slice_packs = slice_packs_def[0][1] if slice_packs_def else 1 + slices_desc_in_pack = visu_pars.get("VisuCoreSlicePacksSlices") + slice_distance = visu_pars.get("VisuCoreSlicePacksSliceDist") + slice_fg = [fg for fg in fg_info['id'] if 'slice' in fg.lower()] + + slice_distances_each_pack = [] + if len(slice_fg): + if slices_desc_in_pack: + num_slices_each_pack = [slices_desc_in_pack[0][1] for _ in range(num_slice_packs)] + else: + num_slices_each_pack = [1] + if isinstance(slice_distance, list): + slice_distances_each_pack.extend([slice_distance[0] for _ in range(num_slice_packs)]) + elif isinstance(slice_distance, (int, float)): + slice_distances_each_pack.extend([slice_distance for _ in range(num_slice_packs)]) + else: + self._warn("Not supported data type for Slice Distance") + else: + num_slices_each_pack = [1] + slice_distances_each_pack = [visu_pars["VisuCoreFrameThickness"]] + return num_slice_packs, num_slices_each_pack, slice_distances_each_pack + + def get_info(self): + return { + 'num_slice_packs': self.num_slice_packs, + 'num_slices_each_pack': self.num_slices_each_pack, + 'slice_distances_each_pack': self.slice_distances_each_pack, + 'slice_distance_unit': 'mm', + 'reverse_slice_order': self.is_reverse, + 'warns': self.warns + } \ No newline at end of file From 4b61643f90716cf3b978de172f4fe6ec1596ec88 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:21:49 -0400 Subject: [PATCH 20/38] [update] tonifti module to folder --- brkraw/app/tonifti/__init__.py | 3 ++ brkraw/app/{tonifti.py => tonifti/main.py} | 48 +++++++++++++++------- 2 files changed, 36 insertions(+), 15 deletions(-) create mode 100644 brkraw/app/tonifti/__init__.py rename brkraw/app/{tonifti.py => tonifti/main.py} (63%) diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py new file mode 100644 index 0000000..61ec8db --- /dev/null +++ b/brkraw/app/tonifti/__init__.py @@ -0,0 +1,3 @@ +from .main import BrkrawToNifti1 + +__all__ = [BrkrawToNifti1] \ No newline at end of file diff --git a/brkraw/app/tonifti.py b/brkraw/app/tonifti/main.py similarity index 63% rename from brkraw/app/tonifti.py rename to brkraw/app/tonifti/main.py index 6af82e2..c6c6f26 100644 --- a/brkraw/app/tonifti.py +++ b/brkraw/app/tonifti/main.py @@ -1,13 +1,20 @@ +import warnings import numpy as np from enum import Enum -from brkraw.api.loader import BrukerLoader +from brkraw.api.loader import BrkrawLoader + + +XYZT_UNITS = \ + dict(EPI=('mm', 'sec')) + class ScaleMode(Enum): NONE = 0 APPLY = 1 HEADER = 2 -class BrukerToNifti(BrukerLoader): + +class BrkrawToNifti1(BrkrawLoader): def __init__(self, path): super().__init__(path) self._cache = {} @@ -20,29 +27,27 @@ def get_scan(self, scan_id:int): self._cache[scan_id] = super().get_scan(scan_id) return self._cache[scan_id] - def get_pars(self, scan_id:int, reco_id:int|None=None): + def get_scan_info(self, scan_id:int, reco_id:int|None=None): scanobj = self.get_scan(scan_id) - return scanobj.get_info(reco_id, get_analyzer=True).get_pars(reco_id) + return scanobj.get_info(reco_id, get_analyzer=True) def get_affine(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): return self.get_affine_dict(scan_id, reco_id, subj_type, subj_position)['affine'] def get_dataobj(self, scan_id:int, reco_id:int|None=None, scale_mode:ScaleMode = ScaleMode.APPLY): if scale_mode == ScaleMode.HEADER: - raise ValueError('HEADER not supported, use get_nifti1image instead') + raise ValueError("The 'HEADER' option for scale_mode is not supported in this context. Only 'NONE' or 'APPLY' options are available. " + "To use the 'HEADER' option, please switch to the 'get_nifti1image' method, which supports storing scales in the header.") data_dict = self.get_data_dict(scan_id, reco_id) dataobj = data_dict['data_array'] - if scale_mode == ScaleMode.APPLY: - dataslp = data_dict['data_slope'] - dataoff = data_dict['data_offset'] return dataobj def get_data_dict(self, scan_id:int, reco_id:int|None=None): scanobj = self.get_scan(scan_id) - data_info = scanobj.get_dataarray(reco_id, get_analyzer=True) + data_info = scanobj.get_data_info(reco_id) axis_labels = data_info.shape_desc + dataarray = data_info.get_dataarray() slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 - dataarray = scanobj.get_dataarray(reco_id) if slice_axis != 2: dataarray = np.swapaxes(dataarray, slice_axis, 2) axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] @@ -51,11 +56,11 @@ def get_data_dict(self, scan_id:int, reco_id:int|None=None): 'data_slope': data_info.slope, 'data_offset': data_info.offset, 'axis_labels': axis_labels - } + } def get_affine_dict(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): scanobj = self.get_scan(scan_id) - affine_info = scanobj.get_affine(reco_id, get_analyzer=True) + affine_info = scanobj.get_affine_info(reco_id) subj_type = subj_type or affine_info.subj_type subj_position = subj_position or affine_info.subj_position affine = affine_info.get_affine(subj_type, subj_position) @@ -67,10 +72,22 @@ def get_affine_dict(self, scan_id:int, reco_id:int|None=None, subj_type:str|None } def get_bids_metadata(self, scan_id:int, reco_id:int|None=None, bids_recipe=None): - pars = self.get_pars(scan_id, reco_id) + pars = self.get_scan_info(scan_id, reco_id) def get_bdata(self, scan_id): - method = self.get_pars(scan_id).method + """Extract, format, and return diffusion bval and bvec""" + info = self.get_scan_info(scan_id) + bvals = np.array(info.method.get('PVM_DwEffBval')) + bvecs = np.array(info.method.get('PVM_DwGradVec').T) + # Correct for single b-vals + if np.size(bvals) < 2: + bvals = np.array([bvals]) + # Normalize bvecs + bvecs_axis = 0 + bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) + bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 + bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) + return bvals, bvecs def get_nifti1header(self, scan_id:int, reco_id:int|None=None): pars = self.get_pars(scan_id, reco_id) @@ -78,7 +95,8 @@ def get_nifti1header(self, scan_id:int, reco_id:int|None=None): def get_nifti1image(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None, scale_mode:ScaleMode = ScaleMode.HEADER): - data_dict = self.get_dataobj(scan_id, reco_id) + smode = scale_mode if scale_mode == ScaleMode.APPLY else ScaleMode.NONE + data_dict = self.get_dataobj(scan_id, reco_id, smode) affine_dict = self.get_affine(scan_id, reco_id, subj_type, subj_position) \ No newline at end of file From 8f58c3ea8f101396033e33f428d5f182fef442a9 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:22:49 -0400 Subject: [PATCH 21/38] [update] module root __all__ string element to object element --- brkraw/api/__init__.py | 4 ++-- brkraw/api/pvobj/__init__.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 39e94f0..67ede03 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,4 @@ -from .loader import BrukerLoader +from .loader import BrkrawLoader from ..config import ConfigManager -__all__ = ['BrukerLoader', 'ConfigManager'] \ No newline at end of file +__all__ = [BrkrawLoader, ConfigManager] \ No newline at end of file diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index 887164f..917aebd 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,4 +1,4 @@ from .dataset import PvDataset, PvScan, PvReco from .parser import Parameter -__all__ = ["PvDataset", "PvScan", "PvReco", "Parameter"] \ No newline at end of file +__all__ = [PvDataset, PvScan, PvReco, Parameter] \ No newline at end of file From dcef35f3e13aaed1b9c747722ae601da7d98af38 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:23:33 -0400 Subject: [PATCH 22/38] [update] analyzer and loader polished --- brkraw/api/analyzer.py | 72 +++++++++------------------------ brkraw/api/loader.py | 91 +++++++++++++++++++++++------------------- 2 files changed, 68 insertions(+), 95 deletions(-) diff --git a/brkraw/api/analyzer.py b/brkraw/api/analyzer.py index 2d39bdd..85da5a3 100644 --- a/brkraw/api/analyzer.py +++ b/brkraw/api/analyzer.py @@ -1,5 +1,4 @@ from __future__ import annotations -import re from brkraw.api import helper import numpy as np from copy import copy @@ -22,11 +21,13 @@ 'side': ['Supine', 'Prone', 'Left', 'Right'] } -class Pars: - def __init__(self): - pass - -class ScanInfoAnalyzer: + +class BaseAnalyzer: + def vars(self): + return self.__dict__ + + +class ScanInfoAnalyzer(BaseAnalyzer): """Helps parse metadata from multiple parameter files to make it more human-readable. Args: @@ -37,10 +38,9 @@ class ScanInfoAnalyzer: NotImplementedError: If an operation is not implemented. """ def __init__(self, pvscan: 'PvScan', reco_id:int|None = None): - self.pars = self.get_pars(pvscan, reco_id) + self._set_pars(pvscan, reco_id) self.info_protocol = helper.Protocol(self).get_info() - if self.pars.visu: - self._set_attrs() + if self.visu_pars: self.info_dataarray = helper.DataArray(self).get_info() self.info_frame_group = helper.FrameGroup(self).get_info() self.info_image = helper.Image(self).get_info() @@ -49,56 +49,22 @@ def __init__(self, pvscan: 'PvScan', reco_id:int|None = None): if self.info_image['dim'] > 1: self.info_orientation = helper.Orientation(self).get_info() - @staticmethod - def get_pars(pvscan: 'PvScan', reco_id: int|None): - pars = Pars() + def _set_pars(self, pvscan: 'PvScan', reco_id: int|None): for p in ['acqp', 'method']: vals = getattr(pvscan, p) - setattr(pars, p, vals) + setattr(self, p, vals) try: - visu = pvscan.get_visu_pars(reco_id) + visu_pars = pvscan.get_visu_pars(reco_id) except FileNotFoundError: - visu = None - setattr(pars, 'visu', visu) - return pars - - def _set_attrs(self): - """ - Parse parameters and set attributes from acqp, method, and visu_pars files. - This function parses parameters from different objects (acqp, method, visu_pars) and sets corresponding attributes in the instance. - Only attributes with prefixes 'Visu', 'PVM_', 'ACQ_' are set as object attributes in snake case to follow Pythonic naming conventions. - - Args: - pvscan: The pvscan parameter. - reco_id: The reco_id parameter. - """ - for prefix, pars_obj in {'Visu': self.pars.visu, - 'PVM_': self.pars.method, - 'ACQ_': self.pars.acqp}.items(): - for key in pars_obj.keys(): - if prefix in key: - attr = self._camel_to_snake_case(key.replace(prefix, '')) - value = getattr(pars_obj, key) - attr = f'{prefix.lower()}{attr}' if '_' in prefix else f'{prefix.lower()}_{attr}' - setattr(self, attr, value) - - @staticmethod - def _camel_to_snake_case(input_string: str): - matches = re.finditer(r'[A-Z]+[^A-Z]*', input_string) - output_string = [] - for m in matches: - string = m.group() - is_upper = [bool(char.isupper()) for char in string] - if sum(is_upper) > 1 and not all(is_upper): - idx_for_space = is_upper.index(False) - output_string.append(f'{string[:idx_for_space-1]}_{string[idx_for_space-1:]}'.lower()) - else: - output_string.append(string.lower()) - return '_'.join(output_string) + visu_pars = None + setattr(self, 'visu_pars', visu_pars) def __dir__(self): return [attr for attr in self.__dict__.keys() if 'info_' in attr] + def get(self, key): + return getattr(self, key) if key in self.__dir__() else None + class AffineAnalyzer: def __init__(self, infoobj: 'ScanInfo'): @@ -106,7 +72,7 @@ def __init__(self, infoobj: 'ScanInfo'): if infoobj.image['dim'] == 2: xr, yr = infoobj.image['resolution'] self.resolution = [(xr, yr, zr) for zr in infoobj.slicepack['slice_distances_each_pack']] - elif self.info.image['dim'] == 3: + elif infoobj.image['dim'] == 3: self.resolution = infoobj.image['resolution'][:] else: raise NotImplementedError @@ -209,7 +175,7 @@ def _inspect_subj_info(subj_pose, subj_type): assert subj_type in SUBJTYPE, 'Invalid subject type' -class DataArrayAnalyzer: +class DataArrayAnalyzer(BaseAnalyzer): def __init__(self, infoobj: 'ScanInfo', fileobj: BufferedReader|ZipExtFile): infoobj = copy(infoobj) self._parse_info(infoobj) diff --git a/brkraw/api/loader.py b/brkraw/api/loader.py index 52c3d7a..c150481 100644 --- a/brkraw/api/loader.py +++ b/brkraw/api/loader.py @@ -1,15 +1,12 @@ from __future__ import annotations -import sys import ctypes from typing import Dict -from .analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer -from ..config import ConfigManager +from .analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer from .pvobj import PvDataset, PvScan - -class BrukerLoader(PvDataset): +class BrkrawLoader(PvDataset): def __init__(self, path): - super().__init__(path, **ConfigManager().get('spec')) + super().__init__(path) self._parse_header() def get_scan(self, scan_id, reco_id=None, analyze=True): @@ -21,36 +18,40 @@ def get_scan(self, scan_id, reco_id=None, analyze=True): loader_address=id(self), analyze=analyze) def _parse_header(self) -> (Dict | None): - if not len(self._contents.keys()): + if not self.contents or 'subject' not in self.contents['files']: self.header = None return - contents = self._contents if 'files' in self._contents else self._contents[list(self._contents.keys())[0]] - if subj := getattr(self, 'subject') if 'subject' in contents['files'] else None: - subj_header = getattr(subj, 'header') if subj else None - if title := subj_header['TITLE'] if subj_header else None: - pvspec = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" - if "360" in title: - entry, position = getattr(subj, "SUBJECT_study_instrument_position").split('_')[:2] - else: - entry = getattr(subj, "SUBJECT_entry").split('_')[-1] - position = getattr(subj, "SUBJECT_position").split('_')[-1] + subj = self.subject + subj_header = getattr(subj, 'header') if subj.is_parameter() else None + if title := subj_header['TITLE'] if subj_header else None: + self.header = {k.replace("SUBJECT_",""):v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} + self.header['sw_version'] = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" + @property + def avail(self): + return super().avail - self.header = { - 'version': pvspec, - 'user_account': subj_header['OWNER'], - 'subject_entry': entry, - 'subject_position': position, - } - else: - self.header = None - - def info(self, io_handler=None): - io_handler = io_handler or sys.stdout - + def info(self): + """output all analyzed information""" + info = {'header': None, + 'scans': {}} + if header := self.header: + info['header'] = header + for scan_id in self.avail: + info['scans'][scan_id] = {} + scanobj = self.get_scan(scan_id) + for reco_id in scanobj.avail: + info['scans'][scan_id][reco_id] = scanobj.get_info(reco_id).vars() + return info -class ScanInfo: + +class ScanInfo(BaseAnalyzer): def __init__(self): - pass + self.warns = [] + + @property + def num_warns(self): + return len(self.warns) + class ScanObj(PvScan): def __init__(self, pvscan: 'PvScan', reco_id: int|None = None, @@ -58,9 +59,7 @@ def __init__(self, pvscan: 'PvScan', reco_id: int|None = None, super().__init__(pvscan._scan_id, (pvscan._rootpath, pvscan._path), pvscan._contents, - pvscan._recos, - binary_files = pvscan._binary_files, - parameter_files = pvscan._parameter_files) + pvscan._recos) self.reco_id = reco_id self._loader_address = loader_address @@ -82,26 +81,34 @@ def get_info(self, reco_id, get_analyzer:bool=False): if 'info_' in attr_name: attr_vals = getattr(analysed, attr_name) setattr(infoobj, attr_name.replace('info_', ''), attr_vals) + if attr_vals and attr_vals['warns']: + infoobj.warns.extend(attr_vals['warns']) return infoobj - def get_affine(self, reco_id:int|None = None, subj_type:str|None = None, subj_position:str|None = None, get_analyzer=False): + def get_affine_info(self, reco_id:int|None = None): if reco_id: info = self.get_info(reco_id) else: info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) - analyzer = AffineAnalyzer(info) - return analyzer if get_analyzer else analyzer.get_affine(subj_type, subj_position) + return AffineAnalyzer(info) - def get_dataarray(self, reco_id: int|None = None, get_analyzer=False): + def get_data_info(self, reco_id: int|None = None): reco_id = reco_id or self.avail[0] recoobj = self.get_reco(reco_id) - datafiles = [f for f in recoobj._contents['files'] if f in recoobj._binary_files] + datafiles = [f for f in recoobj._contents['files'] if f == '2dseq'] if not len(datafiles): - raise FileNotFoundError('no binary file') + raise FileNotFoundError("The required file '2dseq' does not exist. " + "Please check the dataset and ensure the file is in the expected location.") fileobj = recoobj._open_as_fileobject(datafiles.pop()) info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) - analyzer = DataArrayAnalyzer(info, fileobj) - return analyzer if get_analyzer else analyzer.get_dataarray() + return DataArrayAnalyzer(info, fileobj) + + def get_affine(self, reco_id:int|None = None, + subj_type:str|None = None, subj_position:str|None = None): + return self.get_affine_info(reco_id).get_affine(subj_type, subj_position) + + def get_dataarray(self, reco_id: int|None = None): + return self.get_data_info(reco_id).get_dataarray() def retrieve_pvscan(self): if self._pvscan_address: From f307b795958e6e8851f4c694beb005271bdba8eb Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:37:52 -0400 Subject: [PATCH 23/38] [update] helper polish --- brkraw/api/helper/cycle.py | 2 +- brkraw/api/helper/slicepack.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/brkraw/api/helper/cycle.py b/brkraw/api/helper/cycle.py index e1afd71..f753ade 100644 --- a/brkraw/api/helper/cycle.py +++ b/brkraw/api/helper/cycle.py @@ -22,7 +22,7 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): scan_time = analobj.visu_pars.get("VisuAcqScanTime") or 0 fg_info = analobj.get('info_frame_group') or FrameGroup(analobj).get_info() fg_not_slice = [] - if fg_info != None and fg_info['type'] != None: + if fg_info['type'] != None: fg_not_slice.extend([fg_info['shape'][id] for id, fg in enumerate(fg_info['id']) if not re.search('slice', fg, re.IGNORECASE)]) self.num_frames = np.prod(fg_not_slice) if len(fg_not_slice) else 1 diff --git a/brkraw/api/helper/slicepack.py b/brkraw/api/helper/slicepack.py index 1d03e56..36b2b74 100644 --- a/brkraw/api/helper/slicepack.py +++ b/brkraw/api/helper/slicepack.py @@ -23,7 +23,7 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): fg_info = analobj.get("info_frame_group") or FrameGroup(analobj).get_info() img_info = analobj.get("info_image") or Image(analobj).get_info() - if fg_info is None or fg_info['type'] is None: + if fg_info['type'] is None: num_slice_packs = 1 num_slices_each_pack = [visu_pars.get("VisuCoreFrameCount")] slice_distances_each_pack = [visu_pars.get("VisuCoreFrameThickness")] \ From 68c0cc54b3bb2783d9eee68a658a7d984e6fab18 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:39:19 -0400 Subject: [PATCH 24/38] [update] split dataset.py into multiple files to enhance code readability --- brkraw/api/pvobj/__init__.py | 6 +- brkraw/api/pvobj/base.py | 170 +++++++++++ brkraw/api/pvobj/dataset.py | 536 --------------------------------- brkraw/api/pvobj/parameters.py | 160 ++++++++++ brkraw/api/pvobj/parser.py | 143 --------- brkraw/api/pvobj/pvdataset.py | 190 ++++++++++++ brkraw/api/pvobj/pvreco.py | 57 ++++ brkraw/api/pvobj/pvscan.py | 127 ++++++++ 8 files changed, 708 insertions(+), 681 deletions(-) create mode 100644 brkraw/api/pvobj/base.py delete mode 100755 brkraw/api/pvobj/dataset.py create mode 100644 brkraw/api/pvobj/parameters.py create mode 100755 brkraw/api/pvobj/pvdataset.py create mode 100644 brkraw/api/pvobj/pvreco.py create mode 100644 brkraw/api/pvobj/pvscan.py diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index 917aebd..c3bba62 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,4 +1,6 @@ -from .dataset import PvDataset, PvScan, PvReco -from .parser import Parameter +from .pvdataset import PvDataset +from .pvscan import PvScan +from .pvreco import PvReco +from .parameters import Parameter __all__ = [PvDataset, PvScan, PvReco, Parameter] \ No newline at end of file diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py new file mode 100644 index 0000000..4ecef93 --- /dev/null +++ b/brkraw/api/pvobj/base.py @@ -0,0 +1,170 @@ +import os +import zipfile +from collections import OrderedDict +from collections import defaultdict +from .parameters import Parameter + +class BaseMethods: + """ + The `BaseMethods` class provides internal method for PvObjects. + + Explanation: + This class contains various methods for handling files and directories, including fetching directory structure, + fetching zip file contents, opening files as file objects or strings, retrieving values associated with keys, and setting configuration options. + + Args: + **kwargs: Keyword arguments for configuration options. + + Returns: + None + """ + _scan_id = None + _reco_id = None + _path = None + _rootpath = None + _contents = None + + @staticmethod + def _fetch_dir(path): + """Searches for directories and files in a given directory and returns the directory structure. + + Args: + path: The path to the directory. + + Returns: + dict: A dictionary representing the directory structure. + The keys are the relative paths of the directories, and the values are dictionaries with the following keys: + - 'dirs': A list of directory names. + - 'files': A list of file names. + - 'file_indexes': An empty list. + """ + contents = OrderedDict() + abspath = os.path.abspath(path) + for dirpath, dirnames, filenames in os.walk(abspath): + normalized_dirpath = os.path.normpath(dirpath) + relative_path = os.path.relpath(normalized_dirpath, abspath) + contents[relative_path] = {'dirs': dirnames, 'files': filenames, 'file_indexes': []} + return contents + + @staticmethod + def _fetch_zip(path): + """Searches for files in a zip file and returns the directory structure and file information. + + Args: + path: The path to the zip file. + + Returns: + dict: A dictionary representing the directory structure and file information. + The keys are the directory paths, and the values are dictionaries with the following keys: + - 'dirs': A set of directory names. + - 'files': A list of file names. + - 'file_indexes': A list of file indexes. + """ + with zipfile.ZipFile(path) as zip_file: + contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': []}) + for i, item in enumerate(zip_file.infolist()): + if not item.is_dir(): + dirpath, filename = os.path.split(item.filename) + contents[dirpath]['files'].append(filename) + contents[dirpath]['file_indexes'].append(i) + while dirpath: + dirpath, dirname = os.path.split(dirpath) + if dirname: + contents[dirpath]['dirs'].add(dirname) + return contents + + def _open_as_fileobject(self, key): + """Opens a file object for the given key. + + Args: + key: The key to identify the file. + + Returns: + file object: The opened file object. + + Raises: + ValueError: If the key does not exist in the files. + """ + rootpath = self._rootpath or self._path + if not self.contents: + raise ValueError(f'file not exists in "{rel_path}".') + files = self.contents.get('files') + path_list = [*([str(self._scan_id)] if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] + + if key not in files: + if file_indexes := self.contents.get('file_indexes'): + rel_path = self._path + else: + rel_path = os.path.join(*path_list) + raise ValueError(f'file not exists in "{rel_path}".\n [{", ".join(files)}]') + + if file_indexes := self.contents.get('file_indexes'): + with zipfile.ZipFile(rootpath) as zf: + idx = file_indexes[files.index(key)] + return zf.open(zf.namelist()[idx]) + else: + path_list.insert(0, rootpath) + path = os.path.join(*path_list) + return open(path, 'rb') + + def _open_as_string(self, key): + """Opens a file as binary, decodes it as UTF-8, and splits it into lines. + + Args: + key: The key to identify the file. + + Returns: + list: The lines of the file as strings. + """ + with self._open_as_fileobject(key) as f: + string = f.read().decode('UTF-8').split('\n') + return string + + def __getitem__(self, key): + """Returns the value associated with the given key. + + Args: + key: The key to retrieve the value. + + Returns: + object: The value associated with the key. + + Raises: + KeyError: If the key is not found. + """ + return self.__getattr__(key) + + def __getattr__(self, key): + """ + Get attribute by name. + + Args: + key (str): The name of the attribute to retrieve. + + Returns: + Parameter or file object: The parameter object if the key is found in parameter files, otherwise the file object. + + Examples: + obj = Dataset() + param = obj.some_key # Returns a Parameter object or file object. + """ + key = key[1:] if key.startswith('_') else key #new code + + if file := [f for f in self.contents['files'] if (f == key or f.replace('.', '_') == key)]: + fileobj = self._open_as_fileobject(file.pop()) + if self._is_binary(fileobj): + return fileobj + par = Parameter(fileobj.read().decode('UTF-8').split('\n'), + name=key, scan_id=self._scan_id, reco_id=self._reco_id) + return par if par.is_parameter() else fileobj.read().decode('UTF-8').split('\n') + raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") + + @property + def contents(self): + return self._contents + + @staticmethod + def _is_binary(fileobj, bytes=512): + block = fileobj.read(bytes) + fileobj.seek(0) + return b'\x00' in block \ No newline at end of file diff --git a/brkraw/api/pvobj/dataset.py b/brkraw/api/pvobj/dataset.py deleted file mode 100755 index 7b1275b..0000000 --- a/brkraw/api/pvobj/dataset.py +++ /dev/null @@ -1,536 +0,0 @@ -import os -import re -import zipfile -from collections import OrderedDict -from collections import defaultdict -from .parser import Parameter - - -class BaseMethods: - """ - The `BaseMethods` class provides internal method for PvObjects. - - Explanation: - This class contains various methods for handling files and directories, including fetching directory structure, - fetching zip file contents, opening files as file objects or strings, retrieving values associated with keys, and setting configuration options. - - Args: - **kwargs: Keyword arguments for configuration options. - - Returns: - None - """ - _scan_id = None - _reco_id = None - _path = None - _rootpath = None - _contents = None - - @staticmethod - def _fetch_dir(path): - """Searches for directories and files in a given directory and returns the directory structure. - - Args: - path: The path to the directory. - - Returns: - dict: A dictionary representing the directory structure. - The keys are the relative paths of the directories, and the values are dictionaries with the following keys: - - 'dirs': A list of directory names. - - 'files': A list of file names. - - 'file_indexes': An empty list. - """ - contents = OrderedDict() - abspath = os.path.abspath(path) - for dirpath, dirnames, filenames in os.walk(abspath): - normalized_dirpath = os.path.normpath(dirpath) - relative_path = os.path.relpath(normalized_dirpath, abspath) - contents[relative_path] = {'dirs': dirnames, 'files': filenames, 'file_indexes': []} - return contents - - @staticmethod - def _fetch_zip(path): - """Searches for files in a zip file and returns the directory structure and file information. - - Args: - path: The path to the zip file. - - Returns: - dict: A dictionary representing the directory structure and file information. - The keys are the directory paths, and the values are dictionaries with the following keys: - - 'dirs': A set of directory names. - - 'files': A list of file names. - - 'file_indexes': A list of file indexes. - """ - with zipfile.ZipFile(path) as zip_file: - contents = defaultdict(lambda: {'dirs': set(), 'files': [], 'file_indexes': []}) - for i, item in enumerate(zip_file.infolist()): - if not item.is_dir(): - dirpath, filename = os.path.split(item.filename) - contents[dirpath]['files'].append(filename) - contents[dirpath]['file_indexes'].append(i) - while dirpath: - dirpath, dirname = os.path.split(dirpath) - if dirname: - contents[dirpath]['dirs'].add(dirname) - return contents - - def _open_as_fileobject(self, key): - """Opens a file object for the given key. - - Args: - key: The key to identify the file. - - Returns: - file object: The opened file object. - - Raises: - ValueError: If the key does not exist in the files. - """ - rootpath = self._rootpath or self._path - if not self.contents: - raise ValueError(f'file not exists in "{rel_path}".') - files = self.contents.get('files') - path_list = [*([str(self._scan_id)] if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] - - if key not in files: - if file_indexes := self.contents.get('file_indexes'): - rel_path = self._path - else: - rel_path = os.path.join(*path_list) - raise ValueError(f'file not exists in "{rel_path}".\n [{", ".join(files)}]') - - if file_indexes := self.contents.get('file_indexes'): - with zipfile.ZipFile(rootpath) as zf: - idx = file_indexes[files.index(key)] - return zf.open(zf.namelist()[idx]) - else: - path_list.insert(0, rootpath) - path = os.path.join(*path_list) - return open(path, 'rb') - - def _open_as_string(self, key): - """Opens a file as binary, decodes it as UTF-8, and splits it into lines. - - Args: - key: The key to identify the file. - - Returns: - list: The lines of the file as strings. - """ - with self._open_as_fileobject(key) as f: - string = f.read().decode('UTF-8').split('\n') - return string - - def __getitem__(self, key): - """Returns the value associated with the given key. - - Args: - key: The key to retrieve the value. - - Returns: - object: The value associated with the key. - - Raises: - KeyError: If the key is not found. - """ - return self.__getattr__(key) - - def __getattr__(self, key): - """ - Get attribute by name. - - Args: - key (str): The name of the attribute to retrieve. - - Returns: - Parameter or file object: The parameter object if the key is found in parameter files, otherwise the file object. - - Examples: - obj = Dataset() - param = obj.some_key # Returns a Parameter object or file object. - """ - key = key[1:] if key.startswith('_') else key #new code - - if file := [f for f in self.contents['files'] if (f == key or f.replace('.', '_') == key)]: - fileobj = self._open_as_fileobject(file.pop()) - if self._is_binary(fileobj): - return fileobj - par = Parameter(fileobj.read().decode('UTF-8').split('\n'), - name=key, scan_id=self._scan_id, reco_id=self._reco_id) - return par if par.header else fileobj.read().decode('UTF-8').split('\n') - raise AttributeError(f"'{self.__class__.__name__}' object has no attribute '{key}'") - - @property - def contents(self): - return self._contents - - @staticmethod - def _is_binary(fileobj, bytes=512): - block = fileobj.read(bytes) - fileobj.seek(0) - return b'\x00' in block - - -class PvDataset(BaseMethods): - """ - A class representing a PvDataset object. - - Inherits from BaseMethods. - - Attributes: - is_compressed (bool): Indicates if the dataset is compressed. - - Methods: - get_scan(scan_id): Get a specific scan object by ID. - - Properties: - path (str): The path of the object. - avail (list): A list of available scans. - contents (dict): A dictionary of pvdataset contents. - """ - def __init__(self, path, debug=False): - """ - Initialize the object with the given path and optional debug flag. - - Args: - path: The path to initialize the object with. - debug: A flag indicating whether debug mode is enabled. - **kwargs: Additional keyword arguments. - - Raises: - Any exceptions raised by _check_dataset_validity or _construct methods. - - Notes: - If 'pvdataset' is present in kwargs, it will be used to initialize the object via super(). - - Examples: - obj = ClassName(path='/path/to/dataset', debug=True) - """ - - if not debug: - self._check_dataset_validity(path) - self._construct() - - # internal method - def _check_dataset_validity(self, path): - """ - Checks the validity of a given dataset path. - - Note: This method only checks the validity of the dataset to be fetched using `fetch_dir` and `fetch_zip`, - and does not check the validity of a `PvDataset`. - - Args: - path (str): The path to check. - - Raises: - FileNotFoundError: If the path does not exist. - ValueError: If the path is not a directory or a file, or if it does not meet the required criteria. - - Returns: - None - """ - self._path = os.path.abspath(path) - if not os.path.exists(self._path): - raise FileNotFoundError(f"The path '{self._path}' does not exist.") - if os.path.isdir(self._path): - self._contents = self._fetch_dir(self._path) - self.is_compressed = False - elif os.path.isfile(self._path) and zipfile.is_zipfile(self._path): - self._contents = self._fetch_zip(self._path) - self.is_compressed = True - else: - raise ValueError(f"The path '{self._path}' does not meet the required criteria.") - - def _construct(self): # sourcery skip: low-code-quality - """ - Constructs the object by organizing the contents. - - This method constructs the object by organizing the contents based on the provided directory structure. - It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. - After processing, it removes the processed paths from the `_contents` dictionary. - - Args: - **kwargs: keyword argument for datatype specification. - - Returns: - None - """ - self._scans = OrderedDict() - self._backup = OrderedDict() - - to_remove = [] - for path, contents in self._contents.items(): - if not path: - self._root = contents - to_remove.append(path) - elif not contents['files']: - to_remove.append(path) - elif matched := re.match(r'(?:.*/)?(\d+)/(\D+)/(\d+)$', path) or re.match(r'(?:.*/)?(\d+)$', path): - to_remove.append(self._process_childobj(matched, (path, contents))) - self._clear_contents(to_remove) - - def _process_childobj(self, matched, item): - """ - The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. - - Args: - matched: A `re.Match` object representing the matched pattern. - item: A tuple containing the path and contents of the child object. - **kwargs: Additional keyword arguments. - - Returns: - str: The path of the processed child object. - - Raises: - None. - - Examples: - # Example usage of _process_childobj - matched = re.match(pattern, input_string) - item = ('path/to/child', {'dirs': set(), 'files': [], 'file_indexes': []}) - result = obj._process_childobj(matched, item, pvscan={'binary_files': [], 'parameter_files': ['method', 'acqp', 'visu_pars']}) - """ - path, contents = item - scan_id = int(matched.group(1)) - if scan_id not in self._scans: - self._scans[scan_id] = PvScan(scan_id, (self.path, path)) - if len(matched.groups()) == 1 and 'pdata' in contents['dirs']: - self._scans[scan_id].update(contents) - elif len(matched.groups()) == 3 and matched.group(2) == 'pdata': - reco_id = int(matched.group(3)) - self._scans[scan_id].set_reco(path, reco_id, contents) - else: - self._backup[path] = contents - return path - - @property - def contents(self): - for _, contents in super().contents.items(): - if 'subject' in contents['files']: - return contents - - def _clear_contents(self, to_be_removed): - for path in to_be_removed: - try: - del self._contents[path] - except KeyError: - self._dummy.append(path) - - @property - def path(self): - """ - Gets the path of the object. - - Returns: - str: The path of the object. - """ - return self._path - - @property - def avail(self): - """ - A property representing the available scans. - - Returns: - list: A list of available scans. - """ - return sorted(list(self._scans)) - - def get_scan(self, scan_id): - """ - Get a specific scan object by ID. - - Args: - scan_id (int): The ID of the scan object to retrieve. - - Returns: - object: The specified scan object. - - Raises: - KeyError: If the specified scan ID does not exist. - """ - return self._scans[scan_id] - - def __dir__(self): - return super().__dir__() + ['path', 'avail', 'get_scan'] - - - -class PvScan(BaseMethods): - """ - A class representing a PvScan object. - - Inherits from BaseMethods. - - Attributes: - is_compressed (bool): Indicates if the dataset is compressed. - - Methods: - update(contents): Update the contents of the dataset. - set_reco(path, reco_id, contents): Set a reco object with the specified path, ID, and contents. - get_reco(reco_id): Get a specific reco object by ID. - - Properties: - path (str): The path. - avail (list): A list of available items. - contents (dict): A dictionary of pvscan contents. - """ - def __init__(self, scan_id, pathes, contents=None, recos=None): - """ - Initialize a Dataset object. - - Args: - scan_id (int): The ID of the scan. - pathes (tuple): A tuple containing the root path and the path. - contents (list, optional): The initial contents of the dataset. Defaults to None. - recos (dict, optional): A dictionary of reco objects. Defaults to None. - - Attributes: - _scan_id (int): The ID of the scan. - _rootpath (str): The root path. - _path (str): The path. - _recos (OrderedDict): An ordered dictionary of reco objects. - - Methods: - update(contents): Update the contents of the dataset. - """ - self._scan_id = scan_id - self._rootpath, self._path = pathes - self.update(contents) - self._recos = OrderedDict(recos) if recos else OrderedDict() - - def update(self, contents): - """ - Update the contents of the dataset. - - Args: - contents (list): The new contents of the dataset. - - Returns: - None - """ - if contents: - self.is_compressed = True if contents.get('file_indexes') else False - self._contents = contents - - def set_reco(self, path, reco_id, contents): - """ - Set a reco object with the specified path, ID, and contents. - - Args: - path (str): The path of the reco object. - reco_id (int): The ID of the reco object. - contents (list): The contents of the reco object. - - Returns: - None - """ - self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) - - def get_reco(self, reco_id): - """ - Get a specific reco object by ID. - - Args: - reco_id (int): The ID of the reco object to retrieve. - - Returns: - object: The specified reco object. - - Raises: - KeyError: If the specified reco ID does not exist. - """ - return self._recos[reco_id] - - def get_visu_pars(self, reco_id=None): - if reco_id: - return getattr(self.get_reco(reco_id), 'visu_pars') - elif 'visu_pars' in self.contents['files']: - return getattr(self, 'visu_pars') - elif len(self.avail): - recoobj = self.get_reco(self.avail[0]) - if 'visu_pars' not in recoobj.contents['files']: - raise FileNotFoundError - else: - return getattr(recoobj, 'visu_pars') - else: - raise FileNotFoundError - - @property - def path(self): - """ - A property representing the path. - - Returns: - str: The path. - """ - path = (self._rootpath, self._path) - if self.is_compressed: - return path - return os.path.join(*path) - - @property - def avail(self): - """ - A property representing the available items. - - Returns: - list: A list of available items. - """ - return sorted(list(self._recos)) - - -class PvReco(BaseMethods): - """ - A class representing a PvReco object. - - Inherits from BaseMethods. - - Attributes: - is_compressed (bool): Indicates if the dataset is compressed. - - Args: - scan_id (int): The ID of the scan. - reco_id (int): The ID of the reconstruction. - pathes (tuple): A tuple containing the root path and the path. - contents (list): A list of contents. - - Properties: - path (str): The path. - """ - def __init__(self, scan_id, reco_id, pathes, contents): - """ - Initialize a Dataset object. - - Args: - scan_id (int): The ID of the scan. - reco_id (int): The ID of the reconstruction. - pathes (tuple): A tuple containing the root path and the path. - contents (list): A list of contents. - - Attributes: - _scan_id (int): The ID of the scan. - _reco_id (int): The ID of the reconstruction. - _rootpath (str): The root path. - _path (str): The path. - _contents (list): The list of contents. - """ - self._scan_id = scan_id - self._reco_id = reco_id - self._rootpath, self._path = pathes - self._contents = contents - self.is_compressed = True if contents.get('file_indexes') else False - - @property - def path(self): - """ - A property representing the path. - - Returns: - str: The path. - """ - path = (self._rootpath, self._path) - if self.is_compressed: - return path - return os.path.join(*path) diff --git a/brkraw/api/pvobj/parameters.py b/brkraw/api/pvobj/parameters.py new file mode 100644 index 0000000..8a8386c --- /dev/null +++ b/brkraw/api/pvobj/parameters.py @@ -0,0 +1,160 @@ +import re +import numpy as np +from collections import OrderedDict +from .parser import Parser, ptrn_comment, PARAMETER, HEADER + + +class Parameter: + """ + Paravision Parameter object + + This class extends the Parser class and provides methods to initialize the object with a stringlist of parameter dictionaries, retrieve the parameters and headers, and process the contents of the data. + + Args: + stringlist: A list of strings containing the parameter dictionaries. + + Examples: + >>> stringlist = ["param1", "param2"] + >>> parameter = Parameter(stringlist) + + Attributes: + parameters (property): Get the parameters of the data. + headers (property): Get the headers of the data. + + Methods: + _process_contents: Process the contents of the data based on the given parameters. + _set_param: Set the parameters and headers based on the given data. + """ + def __init__(self, stringlist, name, scan_id=None, reco_id=None): + """ + Initialize the Parameter object with the given stringlist, name, scan_id, and reco_id. + + Args: + stringlist: A list of strings containing the parameter dictionaries. + name: The name of the Parser object. + scan_id: The scan ID associated with the Parser object. + reco_id: The reco ID associated with the Parser object. + + Examples: + >>> stringlist = ["param1", "param2"] + >>> name = "MyParser" + >>> scan_id = 12345 + >>> reco_id = 67890 + >>> parser = Parser(stringlist, name, scan_id, reco_id) + """ + self._name = name + self._repr_items = [] + if scan_id: + self._repr_items.append(f'scan_id={scan_id}') + if reco_id: + self._repr_items.append(f'reco_id={reco_id}') + self._set_param(*Parser.load_param(stringlist)) + + @property + def name(self): + if '_' in self._name: + return ''.join([s.capitalize() for s in self._name.split('_')]) + return self._name.capitalize() + + @property + def parameters(self): + """ + Get the parameters of the data. + + Returns: + OrderedDict: The parameters of the data. + + Examples: + This property can be accessed directly on an instance of the class to retrieve the parameters. + """ + return self._parameters + + @property + def header(self): + """ + Get the headers of the data. + + Returns: + OrderedDict: The headers of the data. + + Examples: + This property can be accessed directly on an instance of the class to retrieve the headers. + """ + return self._header + + def _process_contents(self, contents, addr, addr_diff, index, value): + """ + Process the contents of the data based on the given parameters. + + Args: + contents: The contents of the data. + addr: The address of the current parameter. + addr_diff: The difference in addresses between parameters. + index: The index of the current parameter. + value: The value of the current parameter. + + Returns: + tuple: A tuple containing the processed data and its shape. + + Examples: + This method is intended to be called internally within the class and does not have direct usage examples. + """ + if addr_diff[index] > 1: + c_lines = contents[(addr + 1):(addr + addr_diff[index])] + data = " ".join([line.strip() for line in c_lines if not re.match(ptrn_comment, line)]) + return (data, value) if data else (Parser.convert_string_to(value), -1) + return Parser.convert_string_to(value), -1 + + def _set_param(self, params, param_addr, contents): + """ + Set the parameters and headers based on the given data. + + Args: + params: A list of parameter information. + param_addr: The addresses of the parameters. + contents: The contents of the data. + + Raises: + ValueError: If an invalid dtype is encountered. + + Examples: + This method is intended to be called internally within the class and does not have direct usage examples. + """ + addr_diff = np.diff(param_addr) + self._contents = contents + self._header = OrderedDict() + self._parameters = OrderedDict() + for index, addr in enumerate(param_addr[:-1]): + dtype, key, value = params[addr] + data, shape = self._process_contents(contents, addr, addr_diff, index, value) + if dtype is PARAMETER: + self._parameters[key] = Parser.convert_data_to(data, shape) + elif dtype is HEADER: + self._header[key] = data + else: + raise ValueError("Invalid dtype encountered in '_set_param'") + + def __getitem__(self, key): + return self.parameters[key] + + def __getattr__(self, key): + return self.parameters[key] + + def __repr__(self): + return f"{self.name}({', '.join(self._repr_items)})" + + def keys(self): + return self.parameters.keys() + + def values(self): + return self.parameters.values() + + def get(self, key): + if key in self.keys(): + return self.parameters[key] + else: + return None + + def is_parameter(self): + return True if self.header else False + \ No newline at end of file diff --git a/brkraw/api/pvobj/parser.py b/brkraw/api/pvobj/parser.py index 901269f..59225b1 100755 --- a/brkraw/api/pvobj/parser.py +++ b/brkraw/api/pvobj/parser.py @@ -317,146 +317,3 @@ def convert_data_to(data, shape): elif isinstance(data, str): data = Parser.convert_string_to(data) return data - - -class Parameter: - """ - Paravision Parameter object - - This class extends the Parser class and provides methods to initialize the object with a stringlist of parameter dictionaries, retrieve the parameters and headers, and process the contents of the data. - - Args: - stringlist: A list of strings containing the parameter dictionaries. - - Examples: - >>> stringlist = ["param1", "param2"] - >>> parameter = Parameter(stringlist) - - Attributes: - parameters (property): Get the parameters of the data. - headers (property): Get the headers of the data. - - Methods: - _process_contents: Process the contents of the data based on the given parameters. - _set_param: Set the parameters and headers based on the given data. - """ - def __init__(self, stringlist, name, scan_id=None, reco_id=None): - """ - Initialize the Parameter object with the given stringlist, name, scan_id, and reco_id. - - Args: - stringlist: A list of strings containing the parameter dictionaries. - name: The name of the Parser object. - scan_id: The scan ID associated with the Parser object. - reco_id: The reco ID associated with the Parser object. - - Examples: - >>> stringlist = ["param1", "param2"] - >>> name = "MyParser" - >>> scan_id = 12345 - >>> reco_id = 67890 - >>> parser = Parser(stringlist, name, scan_id, reco_id) - """ - self._name = name - self._repr_items = [] - if scan_id: - self._repr_items.append(f'scan_id={scan_id}') - if reco_id: - self._repr_items.append(f'reco_id={reco_id}') - self._set_param(*Parser.load_param(stringlist)) - - @property - def name(self): - if '_' in self._name: - return ''.join([s.capitalize() for s in self._name.split('_')]) - return self._name.capitalize() - - @property - def parameters(self): - """ - Get the parameters of the data. - - Returns: - OrderedDict: The parameters of the data. - - Examples: - This property can be accessed directly on an instance of the class to retrieve the parameters. - """ - return self._parameters - - @property - def header(self): - """ - Get the headers of the data. - - Returns: - OrderedDict: The headers of the data. - - Examples: - This property can be accessed directly on an instance of the class to retrieve the headers. - """ - return self._header - - def _process_contents(self, contents, addr, addr_diff, index, value): - """ - Process the contents of the data based on the given parameters. - - Args: - contents: The contents of the data. - addr: The address of the current parameter. - addr_diff: The difference in addresses between parameters. - index: The index of the current parameter. - value: The value of the current parameter. - - Returns: - tuple: A tuple containing the processed data and its shape. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. - """ - if addr_diff[index] > 1: - c_lines = contents[(addr + 1):(addr + addr_diff[index])] - data = " ".join([line.strip() for line in c_lines if not re.match(ptrn_comment, line)]) - return (data, value) if data else (Parser.convert_string_to(value), -1) - return Parser.convert_string_to(value), -1 - - def _set_param(self, params, param_addr, contents): - """ - Set the parameters and headers based on the given data. - - Args: - params: A list of parameter information. - param_addr: The addresses of the parameters. - contents: The contents of the data. - - Raises: - ValueError: If an invalid dtype is encountered. - - Examples: - This method is intended to be called internally within the class and does not have direct usage examples. - """ - addr_diff = np.diff(param_addr) - self._contents = contents - self._header = OrderedDict() - self._parameters = OrderedDict() - for index, addr in enumerate(param_addr[:-1]): - dtype, key, value = params[addr] - data, shape = self._process_contents(contents, addr, addr_diff, index, value) - if dtype is PARAMETER: - self._parameters[key] = Parser.convert_data_to(data, shape) - elif dtype is HEADER: - self._header[key] = data - else: - raise ValueError("Invalid dtype encountered in _set_param") - - def __getitem__(self, key): - return self.parameters[key] - - def __getattr__(self, key): - return self.parameters[key] - - def __repr__(self): - return f"{self.name}({', '.join(self._repr_items)})" - - def keys(self): - return self.parameters.keys() \ No newline at end of file diff --git a/brkraw/api/pvobj/pvdataset.py b/brkraw/api/pvobj/pvdataset.py new file mode 100755 index 0000000..c3fd46e --- /dev/null +++ b/brkraw/api/pvobj/pvdataset.py @@ -0,0 +1,190 @@ +import os +import re +import zipfile +from collections import OrderedDict +from .base import BaseMethods +from .pvscan import PvScan + + +class PvDataset(BaseMethods): + """ + A class representing a PvDataset object. + + Inherits from BaseMethods. + + Attributes: + is_compressed (bool): Indicates if the dataset is compressed. + + Methods: + get_scan(scan_id): Get a specific scan object by ID. + + Properties: + path (str): The path of the object. + avail (list): A list of available scans. + contents (dict): A dictionary of pvdataset contents. + """ + def __init__(self, path, debug=False): + """ + Initialize the object with the given path and optional debug flag. + + Args: + path: The path to initialize the object with. + debug: A flag indicating whether debug mode is enabled. + **kwargs: Additional keyword arguments. + + Raises: + Any exceptions raised by _check_dataset_validity or _construct methods. + + Notes: + If 'pvdataset' is present in kwargs, it will be used to initialize the object via super(). + + Examples: + obj = ClassName(path='/path/to/dataset', debug=True) + """ + + if not debug: + self._check_dataset_validity(path) + self._construct() + + # internal method + def _check_dataset_validity(self, path): + """ + Checks the validity of a given dataset path. + + Note: This method only checks the validity of the dataset to be fetched using `fetch_dir` and `fetch_zip`, + and does not check the validity of a `PvDataset`. + + Args: + path (str): The path to check. + + Raises: + FileNotFoundError: If the path does not exist. + ValueError: If the path is not a directory or a file, or if it does not meet the required criteria. + + Returns: + None + """ + self._path = os.path.abspath(path) + if not os.path.exists(self._path): + raise FileNotFoundError(f"The path '{self._path}' does not exist.") + if os.path.isdir(self._path): + self._contents = self._fetch_dir(self._path) + self.is_compressed = False + elif os.path.isfile(self._path) and zipfile.is_zipfile(self._path): + self._contents = self._fetch_zip(self._path) + self.is_compressed = True + else: + raise ValueError(f"The path '{self._path}' does not meet the required criteria.") + + def _construct(self): # sourcery skip: low-code-quality + """ + Constructs the object by organizing the contents. + + This method constructs the object by organizing the contents based on the provided directory structure. + It iterates over the sorted contents and updates the `_scans` and `_backup` dictionaries accordingly. + After processing, it removes the processed paths from the `_contents` dictionary. + + Args: + **kwargs: keyword argument for datatype specification. + + Returns: + None + """ + self._scans = OrderedDict() + self._backup = OrderedDict() + + to_remove = [] + for path, contents in self._contents.items(): + if not path: + self._root = contents + to_remove.append(path) + elif not contents['files']: + to_remove.append(path) + elif matched := re.match(r'(?:.*/)?(\d+)/(\D+)/(\d+)$', path) or re.match(r'(?:.*/)?(\d+)$', path): + to_remove.append(self._process_childobj(matched, (path, contents))) + self._clear_contents(to_remove) + + def _process_childobj(self, matched, item): + """ + The `_process_childobj` method processes a child object based on the provided arguments and updates the internal state of the object. + + Args: + matched: A `re.Match` object representing the matched pattern. + item: A tuple containing the path and contents of the child object. + **kwargs: Additional keyword arguments. + + Returns: + str: The path of the processed child object. + + Raises: + None. + + Examples: + # Example usage of _process_childobj + matched = re.match(pattern, input_string) + item = ('path/to/child', {'dirs': set(), 'files': [], 'file_indexes': []}) + result = obj._process_childobj(matched, item, pvscan={'binary_files': [], 'parameter_files': ['method', 'acqp', 'visu_pars']}) + """ + path, contents = item + scan_id = int(matched.group(1)) + if scan_id not in self._scans: + self._scans[scan_id] = PvScan(scan_id, (self.path, path)) + if len(matched.groups()) == 1 and 'pdata' in contents['dirs']: + self._scans[scan_id].update(contents) + elif len(matched.groups()) == 3 and matched.group(2) == 'pdata': + reco_id = int(matched.group(3)) + self._scans[scan_id].set_reco(path, reco_id, contents) + else: + self._backup[path] = contents + return path + + @property + def contents(self): + for _, contents in super().contents.items(): + if 'subject' in contents['files']: + return contents + + def _clear_contents(self, to_be_removed): + for path in to_be_removed: + try: + del self._contents[path] + except KeyError: + self._dummy.append(path) + + @property + def path(self): + """ + Gets the path of the object. + + Returns: + str: The path of the object. + """ + return self._path + + @property + def avail(self): + """ + A property representing the available scans. + + Returns: + list: A list of available scans. + """ + return sorted(list(self._scans)) + + def get_scan(self, scan_id): + """ + Get a specific scan object by ID. + + Args: + scan_id (int): The ID of the scan object to retrieve. + + Returns: + object: The specified scan object. + + Raises: + KeyError: If the specified scan ID does not exist. + """ + return self._scans[scan_id] + + def __dir__(self): + return super().__dir__() + ['path', 'avail', 'get_scan'] diff --git a/brkraw/api/pvobj/pvreco.py b/brkraw/api/pvobj/pvreco.py new file mode 100644 index 0000000..4a7b535 --- /dev/null +++ b/brkraw/api/pvobj/pvreco.py @@ -0,0 +1,57 @@ +import os +from .base import BaseMethods + + +class PvReco(BaseMethods): + """ + A class representing a PvReco object. + + Inherits from BaseMethods. + + Attributes: + is_compressed (bool): Indicates if the dataset is compressed. + + Args: + scan_id (int): The ID of the scan. + reco_id (int): The ID of the reconstruction. + pathes (tuple): A tuple containing the root path and the path. + contents (list): A list of contents. + + Properties: + path (str): The path. + """ + def __init__(self, scan_id, reco_id, pathes, contents): + """ + Initialize a Dataset object. + + Args: + scan_id (int): The ID of the scan. + reco_id (int): The ID of the reconstruction. + pathes (tuple): A tuple containing the root path and the path. + contents (list): A list of contents. + + Attributes: + _scan_id (int): The ID of the scan. + _reco_id (int): The ID of the reconstruction. + _rootpath (str): The root path. + _path (str): The path. + _contents (list): The list of contents. + """ + self._scan_id = scan_id + self._reco_id = reco_id + self._rootpath, self._path = pathes + self._contents = contents + self.is_compressed = True if contents.get('file_indexes') else False + + @property + def path(self): + """ + A property representing the path. + + Returns: + str: The path. + """ + path = (self._rootpath, self._path) + if self.is_compressed: + return path + return os.path.join(*path) diff --git a/brkraw/api/pvobj/pvscan.py b/brkraw/api/pvobj/pvscan.py new file mode 100644 index 0000000..f1182d0 --- /dev/null +++ b/brkraw/api/pvobj/pvscan.py @@ -0,0 +1,127 @@ +import os +from collections import OrderedDict +from .base import BaseMethods +from .pvreco import PvReco + +class PvScan(BaseMethods): + """ + A class representing a PvScan object. + + Inherits from BaseMethods. + + Attributes: + is_compressed (bool): Indicates if the dataset is compressed. + + Methods: + update(contents): Update the contents of the dataset. + set_reco(path, reco_id, contents): Set a reco object with the specified path, ID, and contents. + get_reco(reco_id): Get a specific reco object by ID. + + Properties: + path (str): The path. + avail (list): A list of available items. + contents (dict): A dictionary of pvscan contents. + """ + def __init__(self, scan_id, pathes, contents=None, recos=None): + """ + Initialize a Dataset object. + + Args: + scan_id (int): The ID of the scan. + pathes (tuple): A tuple containing the root path and the path. + contents (list, optional): The initial contents of the dataset. Defaults to None. + recos (dict, optional): A dictionary of reco objects. Defaults to None. + + Attributes: + _scan_id (int): The ID of the scan. + _rootpath (str): The root path. + _path (str): The path. + _recos (OrderedDict): An ordered dictionary of reco objects. + + Methods: + update(contents): Update the contents of the dataset. + """ + self._scan_id = scan_id + self._rootpath, self._path = pathes + self.update(contents) + self._recos = OrderedDict(recos) if recos else OrderedDict() + + def update(self, contents): + """ + Update the contents of the dataset. + + Args: + contents (list): The new contents of the dataset. + + Returns: + None + """ + if contents: + self.is_compressed = True if contents.get('file_indexes') else False + self._contents = contents + + def set_reco(self, path, reco_id, contents): + """ + Set a reco object with the specified path, ID, and contents. + + Args: + path (str): The path of the reco object. + reco_id (int): The ID of the reco object. + contents (list): The contents of the reco object. + + Returns: + None + """ + self._recos[reco_id] = PvReco(self._scan_id, reco_id, (self._rootpath, path), contents) + + def get_reco(self, reco_id): + """ + Get a specific reco object by ID. + + Args: + reco_id (int): The ID of the reco object to retrieve. + + Returns: + object: The specified reco object. + + Raises: + KeyError: If the specified reco ID does not exist. + """ + return self._recos[reco_id] + + def get_visu_pars(self, reco_id=None): + if reco_id: + return getattr(self.get_reco(reco_id), 'visu_pars') + elif 'visu_pars' in self.contents['files']: + return getattr(self, 'visu_pars') + elif len(self.avail): + recoobj = self.get_reco(self.avail[0]) + if 'visu_pars' not in recoobj.contents['files']: + raise FileNotFoundError + else: + return getattr(recoobj, 'visu_pars') + else: + raise FileNotFoundError + + @property + def path(self): + """ + A property representing the path. + + Returns: + str: The path. + """ + path = (self._rootpath, self._path) + if self.is_compressed: + return path + return os.path.join(*path) + + @property + def avail(self): + """ + A property representing the available items. + + Returns: + list: A list of available items. + """ + return sorted(list(self._recos)) \ No newline at end of file From 9bab472403eabca7df86e8e24a9bac7bd2a0c404 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 18:50:50 -0400 Subject: [PATCH 25/38] [update] analyzer module converted to folder --- brkraw/api/analyzer/__init__.py | 6 ++ .../api/{analyzer.py => analyzer/affine.py} | 79 +------------------ brkraw/api/analyzer/base.py | 3 + brkraw/api/analyzer/dataarray.py | 35 ++++++++ brkraw/api/analyzer/scaninfo.py | 45 +++++++++++ 5 files changed, 93 insertions(+), 75 deletions(-) create mode 100644 brkraw/api/analyzer/__init__.py rename brkraw/api/{analyzer.py => analyzer/affine.py} (65%) create mode 100644 brkraw/api/analyzer/base.py create mode 100644 brkraw/api/analyzer/dataarray.py create mode 100644 brkraw/api/analyzer/scaninfo.py diff --git a/brkraw/api/analyzer/__init__.py b/brkraw/api/analyzer/__init__.py new file mode 100644 index 0000000..4fed832 --- /dev/null +++ b/brkraw/api/analyzer/__init__.py @@ -0,0 +1,6 @@ +from .base import BaseAnalyzer +from .scaninfo import ScanInfoAnalyzer +from .affine import AffineAnalyzer +from .dataarray import DataArrayAnalyzer + +__all__ = [BaseAnalyzer, ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer] \ No newline at end of file diff --git a/brkraw/api/analyzer.py b/brkraw/api/analyzer/affine.py similarity index 65% rename from brkraw/api/analyzer.py rename to brkraw/api/analyzer/affine.py index 85da5a3..25ebb73 100644 --- a/brkraw/api/analyzer.py +++ b/brkraw/api/analyzer/affine.py @@ -1,13 +1,12 @@ from __future__ import annotations from brkraw.api import helper +from .base import BaseAnalyzer import numpy as np from copy import copy from typing import TYPE_CHECKING if TYPE_CHECKING: - from .pvobj import PvScan - from .loader import ScanInfo - from io import BufferedReader - from zipfile import ZipExtFile + from ..loader import ScanInfo + SLICEORIENT = { 0: 'sagital', @@ -22,51 +21,7 @@ } -class BaseAnalyzer: - def vars(self): - return self.__dict__ - - -class ScanInfoAnalyzer(BaseAnalyzer): - """Helps parse metadata from multiple parameter files to make it more human-readable. - - Args: - pvscan (PvScan): The PvScan object containing acquisition and method parameters. - reco_id (int, optional): The reconstruction ID. Defaults to None. - - Raises: - NotImplementedError: If an operation is not implemented. - """ - def __init__(self, pvscan: 'PvScan', reco_id:int|None = None): - self._set_pars(pvscan, reco_id) - self.info_protocol = helper.Protocol(self).get_info() - if self.visu_pars: - self.info_dataarray = helper.DataArray(self).get_info() - self.info_frame_group = helper.FrameGroup(self).get_info() - self.info_image = helper.Image(self).get_info() - self.info_slicepack = helper.SlicePack(self).get_info() - self.info_cycle = helper.Cycle(self).get_info() - if self.info_image['dim'] > 1: - self.info_orientation = helper.Orientation(self).get_info() - - def _set_pars(self, pvscan: 'PvScan', reco_id: int|None): - for p in ['acqp', 'method']: - vals = getattr(pvscan, p) - setattr(self, p, vals) - try: - visu_pars = pvscan.get_visu_pars(reco_id) - except FileNotFoundError: - visu_pars = None - setattr(self, 'visu_pars', visu_pars) - - def __dir__(self): - return [attr for attr in self.__dict__.keys() if 'info_' in attr] - - def get(self, key): - return getattr(self, key) if key in self.__dir__() else None - - -class AffineAnalyzer: +class AffineAnalyzer(BaseAnalyzer): def __init__(self, infoobj: 'ScanInfo'): infoobj = copy(infoobj) if infoobj.image['dim'] == 2: @@ -173,29 +128,3 @@ def _inspect_subj_info(subj_pose, subj_type): assert side in SUBJPOSE['side'], 'Invalid subject position' if subj_type: assert subj_type in SUBJTYPE, 'Invalid subject type' - - -class DataArrayAnalyzer(BaseAnalyzer): - def __init__(self, infoobj: 'ScanInfo', fileobj: BufferedReader|ZipExtFile): - infoobj = copy(infoobj) - self._parse_info(infoobj) - self.buffer = fileobj - - def _parse_info(self, infoobj: 'ScanInfo'): - if not hasattr(infoobj, 'dataarray'): - raise AttributeError - self.slope = infoobj.dataarray['2dseq_slope'] - self.offset = infoobj.dataarray['2dseq_offset'] - self.dtype = infoobj.dataarray['2dseq_dtype'] - self.shape = infoobj.image['shape'][:] - self.shape_desc = infoobj.image['dim_desc'][:] - if infoobj.frame_group and infoobj.frame_group['type']: - self._calc_array_shape(infoobj) - - def _calc_array_shape(self, infoobj: 'ScanInfo'): - self.shape.extend(infoobj.frame_group['shape'][:]) - self.shape_desc.extend([fgid.replace('FG_', '').lower() for fgid in infoobj.frame_group['id']]) - - def get_dataarray(self): - self.buffer.seek(0) - return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') diff --git a/brkraw/api/analyzer/base.py b/brkraw/api/analyzer/base.py new file mode 100644 index 0000000..0198b9a --- /dev/null +++ b/brkraw/api/analyzer/base.py @@ -0,0 +1,3 @@ +class BaseAnalyzer: + def vars(self): + return self.__dict__ \ No newline at end of file diff --git a/brkraw/api/analyzer/dataarray.py b/brkraw/api/analyzer/dataarray.py new file mode 100644 index 0000000..64fdeda --- /dev/null +++ b/brkraw/api/analyzer/dataarray.py @@ -0,0 +1,35 @@ +from __future__ import annotations +from .base import BaseAnalyzer +import numpy as np +from copy import copy +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from ..loader import ScanInfo + from io import BufferedReader + from zipfile import ZipExtFile + + +class DataArrayAnalyzer(BaseAnalyzer): + def __init__(self, infoobj: 'ScanInfo', fileobj: BufferedReader|ZipExtFile): + infoobj = copy(infoobj) + self._parse_info(infoobj) + self.buffer = fileobj + + def _parse_info(self, infoobj: 'ScanInfo'): + if not hasattr(infoobj, 'dataarray'): + raise AttributeError + self.slope = infoobj.dataarray['2dseq_slope'] + self.offset = infoobj.dataarray['2dseq_offset'] + self.dtype = infoobj.dataarray['2dseq_dtype'] + self.shape = infoobj.image['shape'][:] + self.shape_desc = infoobj.image['dim_desc'][:] + if infoobj.frame_group and infoobj.frame_group['type']: + self._calc_array_shape(infoobj) + + def _calc_array_shape(self, infoobj: 'ScanInfo'): + self.shape.extend(infoobj.frame_group['shape'][:]) + self.shape_desc.extend([fgid.replace('FG_', '').lower() for fgid in infoobj.frame_group['id']]) + + def get_dataarray(self): + self.buffer.seek(0) + return np.frombuffer(self.buffer.read(), self.dtype).reshape(self.shape, order='F') diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py new file mode 100644 index 0000000..7af2189 --- /dev/null +++ b/brkraw/api/analyzer/scaninfo.py @@ -0,0 +1,45 @@ +from __future__ import annotations +from brkraw.api import helper +from .base import BaseAnalyzer +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from ..pvobj import PvScan + + +class ScanInfoAnalyzer(BaseAnalyzer): + """Helps parse metadata from multiple parameter files to make it more human-readable. + + Args: + pvscan (PvScan): The PvScan object containing acquisition and method parameters. + reco_id (int, optional): The reconstruction ID. Defaults to None. + + Raises: + NotImplementedError: If an operation is not implemented. + """ + def __init__(self, pvscan: 'PvScan', reco_id:int|None = None): + self._set_pars(pvscan, reco_id) + self.info_protocol = helper.Protocol(self).get_info() + if self.visu_pars: + self.info_dataarray = helper.DataArray(self).get_info() + self.info_frame_group = helper.FrameGroup(self).get_info() + self.info_image = helper.Image(self).get_info() + self.info_slicepack = helper.SlicePack(self).get_info() + self.info_cycle = helper.Cycle(self).get_info() + if self.info_image['dim'] > 1: + self.info_orientation = helper.Orientation(self).get_info() + + def _set_pars(self, pvscan: 'PvScan', reco_id: int|None): + for p in ['acqp', 'method']: + vals = getattr(pvscan, p) + setattr(self, p, vals) + try: + visu_pars = pvscan.get_visu_pars(reco_id) + except FileNotFoundError: + visu_pars = None + setattr(self, 'visu_pars', visu_pars) + + def __dir__(self): + return [attr for attr in self.__dict__.keys() if 'info_' in attr] + + def get(self, key): + return getattr(self, key) if key in self.__dir__() else None \ No newline at end of file From 6f5ade9db0e4c1b7335e46e66d4da76a37ec0e82 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 19:01:57 -0400 Subject: [PATCH 26/38] [update] loader.py module into brkobj module --- brkraw/api/__init__.py | 4 +-- brkraw/api/analyzer/dataarray.py | 2 +- brkraw/api/brkobj/__init__.py | 4 +++ brkraw/api/{loader.py => brkobj/scan.py} | 44 ++---------------------- brkraw/api/brkobj/study.py | 44 ++++++++++++++++++++++++ brkraw/app/tonifti/main.py | 4 +-- 6 files changed, 55 insertions(+), 47 deletions(-) create mode 100644 brkraw/api/brkobj/__init__.py rename brkraw/api/{loader.py => brkobj/scan.py} (63%) create mode 100644 brkraw/api/brkobj/study.py diff --git a/brkraw/api/__init__.py b/brkraw/api/__init__.py index 67ede03..5f9ce81 100755 --- a/brkraw/api/__init__.py +++ b/brkraw/api/__init__.py @@ -1,4 +1,4 @@ -from .loader import BrkrawLoader +from .brkobj import StudyObj from ..config import ConfigManager -__all__ = [BrkrawLoader, ConfigManager] \ No newline at end of file +__all__ = [StudyObj, ConfigManager] \ No newline at end of file diff --git a/brkraw/api/analyzer/dataarray.py b/brkraw/api/analyzer/dataarray.py index 64fdeda..969aa04 100644 --- a/brkraw/api/analyzer/dataarray.py +++ b/brkraw/api/analyzer/dataarray.py @@ -4,7 +4,7 @@ from copy import copy from typing import TYPE_CHECKING if TYPE_CHECKING: - from ..loader import ScanInfo + from ..brkobj import ScanInfo from io import BufferedReader from zipfile import ZipExtFile diff --git a/brkraw/api/brkobj/__init__.py b/brkraw/api/brkobj/__init__.py new file mode 100644 index 0000000..093bafa --- /dev/null +++ b/brkraw/api/brkobj/__init__.py @@ -0,0 +1,4 @@ +from .study import StudyObj +from .scan import ScanObj + +__all__ = [StudyObj, ScanObj] \ No newline at end of file diff --git a/brkraw/api/loader.py b/brkraw/api/brkobj/scan.py similarity index 63% rename from brkraw/api/loader.py rename to brkraw/api/brkobj/scan.py index c150481..2554dc4 100644 --- a/brkraw/api/loader.py +++ b/brkraw/api/brkobj/scan.py @@ -1,47 +1,7 @@ from __future__ import annotations import ctypes -from typing import Dict -from .analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer -from .pvobj import PvDataset, PvScan - -class BrkrawLoader(PvDataset): - def __init__(self, path): - super().__init__(path) - self._parse_header() - - def get_scan(self, scan_id, reco_id=None, analyze=True): - """ - Get a scan object by scan ID. - """ - pvscan = super().get_scan(scan_id) - return ScanObj(pvscan=pvscan, reco_id=reco_id, - loader_address=id(self), analyze=analyze) - - def _parse_header(self) -> (Dict | None): - if not self.contents or 'subject' not in self.contents['files']: - self.header = None - return - subj = self.subject - subj_header = getattr(subj, 'header') if subj.is_parameter() else None - if title := subj_header['TITLE'] if subj_header else None: - self.header = {k.replace("SUBJECT_",""):v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} - self.header['sw_version'] = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" - @property - def avail(self): - return super().avail - - def info(self): - """output all analyzed information""" - info = {'header': None, - 'scans': {}} - if header := self.header: - info['header'] = header - for scan_id in self.avail: - info['scans'][scan_id] = {} - scanobj = self.get_scan(scan_id) - for reco_id in scanobj.avail: - info['scans'][scan_id][reco_id] = scanobj.get_info(reco_id).vars() - return info +from ..pvobj import PvScan +from ..analyzer import ScanInfoAnalyzer, AffineAnalyzer, DataArrayAnalyzer, BaseAnalyzer class ScanInfo(BaseAnalyzer): diff --git a/brkraw/api/brkobj/study.py b/brkraw/api/brkobj/study.py new file mode 100644 index 0000000..523537d --- /dev/null +++ b/brkraw/api/brkobj/study.py @@ -0,0 +1,44 @@ +from __future__ import annotations +from typing import Dict +from ..pvobj import PvDataset +from .scan import ScanObj + +class StudyObj(PvDataset): + def __init__(self, path): + super().__init__(path) + self._parse_header() + + def get_scan(self, scan_id, reco_id=None, analyze=True): + """ + Get a scan object by scan ID. + """ + pvscan = super().get_scan(scan_id) + return ScanObj(pvscan=pvscan, reco_id=reco_id, + loader_address=id(self), analyze=analyze) + + def _parse_header(self) -> (Dict | None): + if not self.contents or 'subject' not in self.contents['files']: + self.header = None + return + subj = self.subject + subj_header = getattr(subj, 'header') if subj.is_parameter() else None + if title := subj_header['TITLE'] if subj_header else None: + self.header = {k.replace("SUBJECT_",""):v for k, v in subj.parameters.items() if k.startswith("SUBJECT")} + self.header['sw_version'] = title.split(',')[-1].strip() if 'ParaVision' in title else "ParaVision < 6" + + @property + def avail(self): + return super().avail + + def info(self): + """output all analyzed information""" + info = {'header': None, + 'scans': {}} + if header := self.header: + info['header'] = header + for scan_id in self.avail: + info['scans'][scan_id] = {} + scanobj = self.get_scan(scan_id) + for reco_id in scanobj.avail: + info['scans'][scan_id][reco_id] = scanobj.get_info(reco_id).vars() + return info diff --git a/brkraw/app/tonifti/main.py b/brkraw/app/tonifti/main.py index c6c6f26..c0fbb0d 100644 --- a/brkraw/app/tonifti/main.py +++ b/brkraw/app/tonifti/main.py @@ -1,7 +1,7 @@ import warnings import numpy as np from enum import Enum -from brkraw.api.loader import BrkrawLoader +from brkraw.api.brkobj import StudyObj XYZT_UNITS = \ @@ -14,7 +14,7 @@ class ScaleMode(Enum): HEADER = 2 -class BrkrawToNifti1(BrkrawLoader): +class BrkrawToNifti1(StudyObj): def __init__(self, path): super().__init__(path) self._cache = {} From f1abf24a8c6bafaf2e5f85aa1e7753d7f037731e Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 19:04:51 -0400 Subject: [PATCH 27/38] [patch] update module import path for brkobj --- brkraw/api/analyzer/affine.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/brkraw/api/analyzer/affine.py b/brkraw/api/analyzer/affine.py index 25ebb73..2a40078 100644 --- a/brkraw/api/analyzer/affine.py +++ b/brkraw/api/analyzer/affine.py @@ -5,7 +5,7 @@ from copy import copy from typing import TYPE_CHECKING if TYPE_CHECKING: - from ..loader import ScanInfo + from ..brkobj import ScanInfo SLICEORIENT = { From 81951890958998fe8fc3562e559f0492d1b5f4bb Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sat, 13 Apr 2024 22:28:15 -0400 Subject: [PATCH 28/38] [new feature] RecoToNifti, convert nii by input 2dseq and visu_pars --- brkraw/api/analyzer/affine.py | 4 +- brkraw/api/analyzer/scaninfo.py | 35 +++++++---- brkraw/api/helper/dataarray.py | 14 +++-- brkraw/api/helper/orientation.py | 6 +- brkraw/app/tonifti/reco.py | 102 +++++++++++++++++++++++++++++++ 5 files changed, 143 insertions(+), 18 deletions(-) create mode 100644 brkraw/app/tonifti/reco.py diff --git a/brkraw/api/analyzer/affine.py b/brkraw/api/analyzer/affine.py index 2a40078..be86f24 100644 --- a/brkraw/api/analyzer/affine.py +++ b/brkraw/api/analyzer/affine.py @@ -5,7 +5,7 @@ from copy import copy from typing import TYPE_CHECKING if TYPE_CHECKING: - from ..brkobj import ScanInfo + from ..brkobj.scan import ScanInfo SLICEORIENT = { @@ -28,7 +28,7 @@ def __init__(self, infoobj: 'ScanInfo'): xr, yr = infoobj.image['resolution'] self.resolution = [(xr, yr, zr) for zr in infoobj.slicepack['slice_distances_each_pack']] elif infoobj.image['dim'] == 3: - self.resolution = infoobj.image['resolution'][:] + self.resolution = [infoobj.image['resolution'][:]] else: raise NotImplementedError if infoobj.slicepack['num_slice_packs'] > 1: diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 7af2189..0b6d5f2 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -4,6 +4,7 @@ from typing import TYPE_CHECKING if TYPE_CHECKING: from ..pvobj import PvScan + from brkraw.app.tonifti.reco import RecoToNifti class ScanInfoAnalyzer(BaseAnalyzer): @@ -16,17 +17,29 @@ class ScanInfoAnalyzer(BaseAnalyzer): Raises: NotImplementedError: If an operation is not implemented. """ - def __init__(self, pvscan: 'PvScan', reco_id:int|None = None): - self._set_pars(pvscan, reco_id) - self.info_protocol = helper.Protocol(self).get_info() - if self.visu_pars: - self.info_dataarray = helper.DataArray(self).get_info() - self.info_frame_group = helper.FrameGroup(self).get_info() - self.info_image = helper.Image(self).get_info() - self.info_slicepack = helper.SlicePack(self).get_info() - self.info_cycle = helper.Cycle(self).get_info() - if self.info_image['dim'] > 1: - self.info_orientation = helper.Orientation(self).get_info() + def __init__(self, pvscan: 'PvScan'|'RecoToNifti', reco_id:int|None = None): + if hasattr(pvscan, 'is_recotonifti') and pvscan.is_recotonifti: + self._set_reco(pvscan) + else: + self._set_pars(pvscan, reco_id) + self.info_protocol = helper.Protocol(self).get_info() + if self.visu_pars: + self._parse_info() + + def _parse_info(self): + self.info_dataarray = helper.DataArray(self).get_info() + self.info_frame_group = helper.FrameGroup(self).get_info() + self.info_image = helper.Image(self).get_info() + self.info_slicepack = helper.SlicePack(self).get_info() + self.info_cycle = helper.Cycle(self).get_info() + if self.info_image['dim'] > 1: + self.info_orientation = helper.Orientation(self).get_info() + + def _set_reco(self, pvscan: 'RecoToNifti'): + setattr(self, 'visu_pars', pvscan.visu_pars) + setattr(self, 'method', pvscan.method) + setattr(self, 'acqp', None) + self._parse_info() def _set_pars(self, pvscan: 'PvScan', reco_id: int|None): for p in ['acqp', 'method']: diff --git a/brkraw/api/helper/dataarray.py b/brkraw/api/helper/dataarray.py index 1ab0310..04edb0a 100644 --- a/brkraw/api/helper/dataarray.py +++ b/brkraw/api/helper/dataarray.py @@ -32,19 +32,25 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): acqp = analobj.acqp visu_pars = analobj.visu_pars - fid_word_type = f'_{"".join(acqp["ACQ_word_size"].split("_"))}_SGN_INT' - fid_byte_order = f'{acqp["BYTORDA"]}Endian' - self.fid_dtype = np.dtype(f'{BYTEORDER[fid_byte_order]}{WORDTYPE[fid_word_type]}') - + if acqp: + fid_word_type = f'_{"".join(acqp["ACQ_word_size"].split("_"))}_SGN_INT' + fid_byte_order = f'{acqp["BYTORDA"]}Endian' + self.fid_dtype = np.dtype(f'{BYTEORDER[fid_byte_order]}{WORDTYPE[fid_word_type]}') + else: + self.fid_dtype = None + self._warn("Failed to fetch 'fid_dtype' information because the 'acqp' file is missing from 'analobj'.") + byte_order = visu_pars["VisuCoreByteOrder"] word_type = visu_pars["VisuCoreWordType"] self.data_dtype = np.dtype(f'{BYTEORDER[byte_order]}{WORDTYPE[word_type]}') + data_slope = visu_pars["VisuCoreDataSlope"] data_offset = visu_pars["VisuCoreDataOffs"] self.data_slope = data_slope[0] \ if isinstance(data_slope, list) and is_all_element_same(data_slope) else data_slope self.data_offset = data_offset[0] \ if isinstance(data_offset, list) and is_all_element_same(data_offset) else data_offset + if isinstance(self.data_slope, list) or isinstance(self.data_offset, list): self._warn("Data slope and data offset values are unusual. " "They are expected to be either a list containing the same elements or a single float value.") diff --git a/brkraw/api/helper/orientation.py b/brkraw/api/helper/orientation.py index 454a03f..7a86195 100644 --- a/brkraw/api/helper/orientation.py +++ b/brkraw/api/helper/orientation.py @@ -69,9 +69,10 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): self.subject_position = visu_pars.get("VisuSubjectPosition") self._orient = visu_pars["VisuCoreOrientation"].tolist() self._position = visu_pars["VisuCorePosition"] - self.gradient_orient = analobj.method["PVM_SPackArrGradOrient"] + self._set_gradient_orient(analobj) self.num_slice_packs = info_slicepack['num_slice_packs'] self.gradient_encoding_dir = self._get_gradient_encoding_dir(visu_pars) + self.orientation = [] self.orientation_desc = [] self.volume_origin = [] @@ -81,6 +82,9 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): else: self._case_single_slicepack() + def _set_gradient_orient(self, analobj): + self.gradient_orient = analobj.method["PVM_SPackArrGradOrient"] if analobj.method else None + def get_info(self): return { 'subject_type': self.subject_type, diff --git a/brkraw/app/tonifti/reco.py b/brkraw/app/tonifti/reco.py new file mode 100644 index 0000000..e79d692 --- /dev/null +++ b/brkraw/app/tonifti/reco.py @@ -0,0 +1,102 @@ +import warnings +import numpy as np +from pathlib import Path +from brkraw.api.pvobj import Parameter +from brkraw.api.brkobj.scan import ScanInfo +from brkraw.api.analyzer import ScanInfoAnalyzer, DataArrayAnalyzer, AffineAnalyzer + + +class RecoToNifti: + def __init__(self, data_path:Path, visu_pars:Path, method:Path=None): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + self._load_arr(data_path) + self._load_pars(visu_pars, method) + self._set_info() + + def is_recotonifti(self): + return True + + def _set_info(self): + analysed = ScanInfoAnalyzer(self) + infoobj = ScanInfo() + + for attr_name in dir(analysed): + if 'info_' in attr_name: + attr_vals = getattr(analysed, attr_name) + setattr(infoobj, attr_name.replace('info_', ''), attr_vals) + if attr_vals and attr_vals['warns']: + infoobj.warns.extend(attr_vals['warns']) + self.info = infoobj + self.analysed = analysed + + def _load_arr(self, data_path): + self.fileobj = open(data_path, 'rb') + + def _load_pars(self, visu_pars, method): + visu_str = self._open_as_string(visu_pars) + visu_obj = Parameter(visu_str, name='visu_pars') + if not len([k for k in visu_obj.keys() if 'visu' in k.lower()]): + raise TypeError("The loaded file is incompatible with the expected 'visu_pars' file. " + "Please verify that the file path correctly points to a 'visu_pars' file.") + self.visu_pars = visu_obj + if method: + method_str = self._open_as_string(method) + method_obj = Parameter(method_str, name='method') + if not len([k for k in method_obj.keys() if 'pvm_' in k.lower()]): + raise TypeError("The loaded file is incompatible with the expected 'method' file. " + "Please verify that the file path correctly points to a 'method' file.") + self.method = method_obj + else: + self.method = None + warnings.warn("The 'RecoToNifti' object did not receive an input argument for the 'method' file. " + "As a result, the affine matrix may be inaccurate. " + "Please consider providing the 'method' file as input if possible.") + + @staticmethod + def _open_as_fileobj(path: Path): + return open(path, 'rb') + + @classmethod + def _open_as_string(cls, path: Path): + return cls._open_as_fileobj(path).read().decode('UTF-8').split('\n') + + def get_data_dict(self): + data_info = DataArrayAnalyzer(self.analysed, self.fileobj) + axis_labels = data_info.shape_desc + dataarray = data_info.get_dataarray() + slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 + if slice_axis != 2: + dataarray = np.swapaxes(dataarray, slice_axis, 2) + axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] + return { + 'data_array': dataarray, + 'data_slope': data_info.slope, + 'data_offset': data_info.offset, + 'axis_labels': axis_labels + } + + def get_affine_dict(self, subj_type:str|None=None, subj_position:str|None=None): + affine_info = AffineAnalyzer(self.info) + subj_type = subj_type or affine_info.subj_type + subj_position = subj_position or affine_info.subj_position + affine = affine_info.get_affine(subj_type, subj_position) + return { + "num_slicepacks": len(affine) if isinstance(affine, list) else 1, + "affine": affine, + "subj_type": subj_type, + "subj_position": subj_position + } + + def get_dataobj(self, scale_correction=True): + data_dict = self.get_data_dict() + if scale_correction: + dataobj = data_dict['data_array'] * data_dict['data_slope'] + data_dict['data_offset'] + return dataobj + + def get_affine(self, subj_type:str|None=None, subj_position:str|None=None): + return self.get_affine_dict(subj_type, subj_position)['affine'] From 2312f2e43b227e62e6fcf3352e6ab199417c0e3e Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 09:02:22 -0400 Subject: [PATCH 29/38] [new feature] PvFiles object - class to import files instead of folder --- brkraw/api/analyzer/scaninfo.py | 49 ++++++++++++++------------------- brkraw/api/helper/protocol.py | 21 ++++++++------ brkraw/api/pvobj/__init__.py | 3 +- brkraw/api/pvobj/base.py | 8 +++--- brkraw/api/pvobj/pvfiles.py | 47 +++++++++++++++++++++++++++++++ brkraw/api/pvobj/pvscan.py | 14 ++++------ 6 files changed, 92 insertions(+), 50 deletions(-) create mode 100644 brkraw/api/pvobj/pvfiles.py diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 0b6d5f2..515fdfc 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -3,29 +3,38 @@ from .base import BaseAnalyzer from typing import TYPE_CHECKING if TYPE_CHECKING: - from ..pvobj import PvScan - from brkraw.app.tonifti.reco import RecoToNifti + from ..pvobj import PvScan, PvReco, PvFiles class ScanInfoAnalyzer(BaseAnalyzer): """Helps parse metadata from multiple parameter files to make it more human-readable. Args: - pvscan (PvScan): The PvScan object containing acquisition and method parameters. + pvobj (PvScan): The PvScan object containing acquisition and method parameters. reco_id (int, optional): The reconstruction ID. Defaults to None. Raises: NotImplementedError: If an operation is not implemented. """ - def __init__(self, pvscan: 'PvScan'|'RecoToNifti', reco_id:int|None = None): - if hasattr(pvscan, 'is_recotonifti') and pvscan.is_recotonifti: - self._set_reco(pvscan) - else: - self._set_pars(pvscan, reco_id) - self.info_protocol = helper.Protocol(self).get_info() - if self.visu_pars: - self._parse_info() - + def __init__(self, pvobj: 'PvScan'|'PvReco'|'PvFiles', reco_id:int|None = None): + self._set_pars(pvobj, reco_id) + self.info_protocol = helper.Protocol(self).get_info() + if self.visu_pars: + self._parse_info() + + def _set_pars(self, pvobj: 'PvScan'|'PvReco'|'PvFiles', reco_id: int|None): + for p in ['acqp', 'method']: + try: + vals = getattr(pvobj, p) + except AttributeError: + vals = None + setattr(self, p, vals) + try: + visu_pars = pvobj.get_visu_pars(reco_id) + except FileNotFoundError: + visu_pars = None + setattr(self, 'visu_pars', visu_pars) + def _parse_info(self): self.info_dataarray = helper.DataArray(self).get_info() self.info_frame_group = helper.FrameGroup(self).get_info() @@ -35,22 +44,6 @@ def _parse_info(self): if self.info_image['dim'] > 1: self.info_orientation = helper.Orientation(self).get_info() - def _set_reco(self, pvscan: 'RecoToNifti'): - setattr(self, 'visu_pars', pvscan.visu_pars) - setattr(self, 'method', pvscan.method) - setattr(self, 'acqp', None) - self._parse_info() - - def _set_pars(self, pvscan: 'PvScan', reco_id: int|None): - for p in ['acqp', 'method']: - vals = getattr(pvscan, p) - setattr(self, p, vals) - try: - visu_pars = pvscan.get_visu_pars(reco_id) - except FileNotFoundError: - visu_pars = None - setattr(self, 'visu_pars', visu_pars) - def __dir__(self): return [attr for attr in self.__dict__.keys() if 'info_' in attr] diff --git a/brkraw/api/helper/protocol.py b/brkraw/api/helper/protocol.py index fb1e29d..e8bd64a 100644 --- a/brkraw/api/helper/protocol.py +++ b/brkraw/api/helper/protocol.py @@ -13,16 +13,19 @@ class Protocol(BaseHelper): """ def __init__(self, analobj: 'ScanInfoAnalyzer'): super().__init__() + acqp = analobj.acqp - self.sw_version = str(acqp.get('ACQ_sw_version')) - self.operator = acqp.get('ACQ_operator') - self.pulse_program = acqp.get('PULPROG') - self.nucleus = acqp.get('NUCLEUS') - self.protocol_name = acqp.get('ACQ_protocol_name') or acqp.get('ACQ_scan_name') - self.scan_method = acqp.get('ACQ_method') - self.subject_pos = acqp.get('ACQ_patient_pos') - self.institution = acqp.get('ACQ_institution') - self.device = acqp.get('ACQ_station') + if not acqp: + self._warn("Failed to fetch all Protocol information because the 'acqp' file is missing from 'analobj'.") + self.sw_version = str(acqp.get('ACQ_sw_version')) if acqp else None + self.operator = acqp.get('ACQ_operator') if acqp else None + self.pulse_program = acqp.get('PULPROG') if acqp else None + self.nucleus = acqp.get('NUCLEUS') if acqp else None + self.protocol_name = acqp.get('ACQ_protocol_name') or acqp.get('ACQ_scan_name') if acqp else None + self.scan_method = acqp.get('ACQ_method') if acqp else None + self.subject_pos = acqp.get('ACQ_patient_pos') if acqp else None + self.institution = acqp.get('ACQ_institution') if acqp else None + self.device = acqp.get('ACQ_station') if acqp else None def get_info(self): return { diff --git a/brkraw/api/pvobj/__init__.py b/brkraw/api/pvobj/__init__.py index c3bba62..ed59d4f 100755 --- a/brkraw/api/pvobj/__init__.py +++ b/brkraw/api/pvobj/__init__.py @@ -1,6 +1,7 @@ from .pvdataset import PvDataset from .pvscan import PvScan from .pvreco import PvReco +from .pvfiles import PvFiles from .parameters import Parameter -__all__ = [PvDataset, PvScan, PvReco, Parameter] \ No newline at end of file +__all__ = [PvDataset, PvScan, PvReco, PvFiles, Parameter] \ No newline at end of file diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index 4ecef93..ec2f330 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -83,11 +83,11 @@ def _open_as_fileobject(self, key): file object: The opened file object. Raises: - ValueError: If the key does not exist in the files. + KeyError: If the key does not exist in the files. """ rootpath = self._rootpath or self._path if not self.contents: - raise ValueError(f'file not exists in "{rel_path}".') + raise KeyError(f'Failed to load contents list from "{rootpath}".') files = self.contents.get('files') path_list = [*([str(self._scan_id)] if self._scan_id else []), *(['pdata', str(self._reco_id)] if self._reco_id else []), key] @@ -96,7 +96,7 @@ def _open_as_fileobject(self, key): rel_path = self._path else: rel_path = os.path.join(*path_list) - raise ValueError(f'file not exists in "{rel_path}".\n [{", ".join(files)}]') + raise KeyError(f'Failed to load filename "{key}" from folder "{rel_path}".\n [{", ".join(files)}]') if file_indexes := self.contents.get('file_indexes'): with zipfile.ZipFile(rootpath) as zf: @@ -148,7 +148,7 @@ def __getattr__(self, key): obj = Dataset() param = obj.some_key # Returns a Parameter object or file object. """ - key = key[1:] if key.startswith('_') else key #new code + key = key[1:] if key.startswith('_') else key if file := [f for f in self.contents['files'] if (f == key or f.replace('.', '_') == key)]: fileobj = self._open_as_fileobject(file.pop()) diff --git a/brkraw/api/pvobj/pvfiles.py b/brkraw/api/pvobj/pvfiles.py new file mode 100644 index 0000000..939699e --- /dev/null +++ b/brkraw/api/pvobj/pvfiles.py @@ -0,0 +1,47 @@ +import os +from .base import BaseMethods + +class PvFiles(BaseMethods): + def __init__(self, *files): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + self.update(*files) + + def update(self, *files): + self._path = [os.path.abspath(f) for f in files if os.path.exists(f)] + self._contents = {"files": [os.path.basename(f) for f in self._path], + "dirs": [], + "file_indexes": []} + + def _open_as_fileobject(self, key): + """Override open_as_fileobject method + + Args: + key: The key to identify the file. + + Returns: + file object: The opened file object. + + Raises: + ValueError: If the key does not exist in the files. + """ + if file_path := self._search_file_path(key): + return open(file_path, 'rb') + raise KeyError(f'Failed to find filename "{key}" from input files.\n [{self.contents.get("files")}]') + + def _search_file_path(self, key): + if files := [f for f in self._path if key in f]: + return files.pop() + else: + return False + + def get_visu_pars(self, _:None=None): + """ Mock function of PvScan """ + return getattr(self, 'visu_pars') + + def path(self): + return self._path diff --git a/brkraw/api/pvobj/pvscan.py b/brkraw/api/pvobj/pvscan.py index f1182d0..97dae72 100644 --- a/brkraw/api/pvobj/pvscan.py +++ b/brkraw/api/pvobj/pvscan.py @@ -22,7 +22,7 @@ class PvScan(BaseMethods): avail (list): A list of available items. contents (dict): A dictionary of pvscan contents. """ - def __init__(self, scan_id, pathes, contents=None, recos=None): + def __init__(self, scan_id: int|None, pathes, contents=None, recos=None): """ Initialize a Dataset object. @@ -95,13 +95,11 @@ def get_visu_pars(self, reco_id=None): elif 'visu_pars' in self.contents['files']: return getattr(self, 'visu_pars') elif len(self.avail): - recoobj = self.get_reco(self.avail[0]) - if 'visu_pars' not in recoobj.contents['files']: - raise FileNotFoundError - else: - return getattr(recoobj, 'visu_pars') - else: - raise FileNotFoundError + recoobjs = [self.get_reco(rid) for rid in self.avail] + for recoobj in recoobjs: + if 'visu_pars' in recoobj.contents['files']: + return getattr(recoobj, 'visu_pars') + raise FileNotFoundError @property def path(self): From 31f27dafd52b27dd2fff70d52dc95fb5d05999dd Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 09:03:53 -0400 Subject: [PATCH 30/38] [update] ScanInfo exposed in brkobj module --- brkraw/api/brkobj/__init__.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/brkraw/api/brkobj/__init__.py b/brkraw/api/brkobj/__init__.py index 093bafa..464667c 100644 --- a/brkraw/api/brkobj/__init__.py +++ b/brkraw/api/brkobj/__init__.py @@ -1,4 +1,4 @@ from .study import StudyObj -from .scan import ScanObj +from .scan import ScanObj, ScanInfo -__all__ = [StudyObj, ScanObj] \ No newline at end of file +__all__ = [StudyObj, ScanObj, ScanInfo] \ No newline at end of file From e68767b7e8627a1f0f59f43eabbce2227b11df26 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 10:33:05 -0400 Subject: [PATCH 31/38] [patch] pvfiles path method -> property --- brkraw/api/pvobj/pvfiles.py | 1 + 1 file changed, 1 insertion(+) diff --git a/brkraw/api/pvobj/pvfiles.py b/brkraw/api/pvobj/pvfiles.py index 939699e..23503ca 100644 --- a/brkraw/api/pvobj/pvfiles.py +++ b/brkraw/api/pvobj/pvfiles.py @@ -43,5 +43,6 @@ def get_visu_pars(self, _:None=None): """ Mock function of PvScan """ return getattr(self, 'visu_pars') + @property def path(self): return self._path From d4a4e77fcef50a463a59ed5d7714a03a2027b634 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 13:38:50 -0400 Subject: [PATCH 32/38] [update] several updates for tonifti module --- brkraw/api/analyzer/scaninfo.py | 5 +- brkraw/api/brkobj/study.py | 1 + brkraw/api/helper/cycle.py | 12 +-- brkraw/api/helper/protocol.py | 18 ++-- brkraw/app/tonifti/__init__.py | 81 ++++++++++++++++- brkraw/app/tonifti/base.py | 145 ++++++++++++++++++++++++++++++ brkraw/app/tonifti/brkraw.py | 153 ++++++++++++++++++++++++++++++++ brkraw/app/tonifti/converter.py | 16 ++++ brkraw/app/tonifti/header.py | 58 ++++++++++++ brkraw/app/tonifti/loader.py | 8 ++ brkraw/app/tonifti/main.py | 102 --------------------- brkraw/app/tonifti/pvfiles.py | 15 ++++ brkraw/app/tonifti/pvreco.py | 18 ++++ brkraw/app/tonifti/pvscan.py | 18 ++++ brkraw/app/tonifti/reco.py | 102 --------------------- 15 files changed, 529 insertions(+), 223 deletions(-) create mode 100644 brkraw/app/tonifti/base.py create mode 100644 brkraw/app/tonifti/brkraw.py create mode 100644 brkraw/app/tonifti/converter.py create mode 100644 brkraw/app/tonifti/header.py create mode 100644 brkraw/app/tonifti/loader.py delete mode 100644 brkraw/app/tonifti/main.py create mode 100644 brkraw/app/tonifti/pvfiles.py create mode 100644 brkraw/app/tonifti/pvreco.py create mode 100644 brkraw/app/tonifti/pvscan.py delete mode 100644 brkraw/app/tonifti/reco.py diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 515fdfc..43af2f9 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -1,4 +1,5 @@ from __future__ import annotations +from collections import OrderedDict from brkraw.api import helper from .base import BaseAnalyzer from typing import TYPE_CHECKING @@ -27,12 +28,12 @@ def _set_pars(self, pvobj: 'PvScan'|'PvReco'|'PvFiles', reco_id: int|None): try: vals = getattr(pvobj, p) except AttributeError: - vals = None + vals = OrderedDict() setattr(self, p, vals) try: visu_pars = pvobj.get_visu_pars(reco_id) except FileNotFoundError: - visu_pars = None + visu_pars = OrderedDict() setattr(self, 'visu_pars', visu_pars) def _parse_info(self): diff --git a/brkraw/api/brkobj/study.py b/brkraw/api/brkobj/study.py index 523537d..c63bf4e 100644 --- a/brkraw/api/brkobj/study.py +++ b/brkraw/api/brkobj/study.py @@ -30,6 +30,7 @@ def _parse_header(self) -> (Dict | None): def avail(self): return super().avail + @property def info(self): """output all analyzed information""" info = {'header': None, diff --git a/brkraw/api/helper/cycle.py b/brkraw/api/helper/cycle.py index f753ade..60a3dd9 100644 --- a/brkraw/api/helper/cycle.py +++ b/brkraw/api/helper/cycle.py @@ -21,16 +21,16 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): super().__init__() scan_time = analobj.visu_pars.get("VisuAcqScanTime") or 0 fg_info = analobj.get('info_frame_group') or FrameGroup(analobj).get_info() - fg_not_slice = [] + fg_cycle = [] if fg_info['type'] != None: - fg_not_slice.extend([fg_info['shape'][id] for id, fg in enumerate(fg_info['id']) - if not re.search('slice', fg, re.IGNORECASE)]) - self.num_frames = np.prod(fg_not_slice) if len(fg_not_slice) else 1 - self.time_step = (scan_time / self.num_frames) + fg_cycle.extend([fg_info['shape'][id] for id, fg in enumerate(fg_info['id']) \ + if re.search('cycle', fg, re.IGNORECASE)]) + self.num_cycles = fg_cycle.pop() if len(fg_cycle) else 1 + self.time_step = (scan_time / self.num_cycles) def get_info(self): return { - "num_frames": self.num_frames, + "num_cycles": self.num_cycles, "time_step": self.time_step, "unit": 'msec', 'warns': self.warns diff --git a/brkraw/api/helper/protocol.py b/brkraw/api/helper/protocol.py index e8bd64a..dcb4c43 100644 --- a/brkraw/api/helper/protocol.py +++ b/brkraw/api/helper/protocol.py @@ -17,15 +17,15 @@ def __init__(self, analobj: 'ScanInfoAnalyzer'): acqp = analobj.acqp if not acqp: self._warn("Failed to fetch all Protocol information because the 'acqp' file is missing from 'analobj'.") - self.sw_version = str(acqp.get('ACQ_sw_version')) if acqp else None - self.operator = acqp.get('ACQ_operator') if acqp else None - self.pulse_program = acqp.get('PULPROG') if acqp else None - self.nucleus = acqp.get('NUCLEUS') if acqp else None - self.protocol_name = acqp.get('ACQ_protocol_name') or acqp.get('ACQ_scan_name') if acqp else None - self.scan_method = acqp.get('ACQ_method') if acqp else None - self.subject_pos = acqp.get('ACQ_patient_pos') if acqp else None - self.institution = acqp.get('ACQ_institution') if acqp else None - self.device = acqp.get('ACQ_station') if acqp else None + self.sw_version = str(acqp.get('ACQ_sw_version')) + self.operator = acqp.get('ACQ_operator') + self.pulse_program = acqp.get('PULPROG') + self.nucleus = acqp.get('NUCLEUS') + self.protocol_name = acqp.get('ACQ_protocol_name') or acqp.get('ACQ_scan_name') + self.scan_method = acqp.get('ACQ_method') + self.subject_pos = acqp.get('ACQ_patient_pos') + self.institution = acqp.get('ACQ_institution') + self.device = acqp.get('ACQ_station') def get_info(self): return { diff --git a/brkraw/app/tonifti/__init__.py b/brkraw/app/tonifti/__init__.py index 61ec8db..c678c14 100644 --- a/brkraw/app/tonifti/__init__.py +++ b/brkraw/app/tonifti/__init__.py @@ -1,3 +1,80 @@ -from .main import BrkrawToNifti1 +""" +dependency: + bids, plugin +""" +import argparse +from brkraw import __version__ +from .loader import Loader -__all__ = [BrkrawToNifti1] \ No newline at end of file +__all__ = [Loader] + +def load(*args, **kwargs): + """Load data in Facade design pattern + """ + Loader() + +def main(): + """main script allows convert brkraw + provide list function of all available converting mode (including plugin) + """ + parser = argparse.ArgumentParser(prog='brk_tonifti', + description="BrkRaw command-line interface for NifTi conversion") + parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__)) + + subparsers = parser.add_subparsers(title='Sub-commands', + description='To run this command, you must specify one of the functions listed' + 'below next to the command. For more information on each function, ' + 'use -h next to the function name to call help document.', + help='description', + dest='function', + metavar='command') + + input_str = "input raw Bruker data" + input_dir_str = "input directory that contains multiple raw Bruker data" + output_dir_str = "output directory name" + output_fnm_str = "output filename" + bids_opt = "create a JSON file contains metadata based on BIDS recommendation" + + info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data') + + scan = subparsers.add_parser("scan", help='Convert a single raw Bruker data into NifTi file(s)') + study = subparsers.add_parser("study", help="Convert All raw Bruker data located in the input directory") + dataset = subparsers.add_parser("dataset", help="Convert All raw Bruker data located in the input directory") + + # info + info.add_argument("input", help=input_str, type=str) + + # tonii + scan.add_argument("input", help=input_str, type=str) + scan.add_argument("-b", "--bids", help=bids_opt, action='store_true') + scan.add_argument("-o", "--output", help=output_fnm_str, type=str, default=False) + scan.add_argument("-s", "--scanid", help="Scan ID, option to specify a particular scan to convert.", type=str) + scan.add_argument("-r", "--recoid", help="RECO ID (default=1), " + "option to specify a particular reconstruction id to convert", + type=int, default=1) + scan.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ + "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) + scan.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ + "the position variable can be defiend as _, " + \ + "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) + scan.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') + scan.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') + scan.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') + scan.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true', default=True) + + # tonii_all + dataset.add_argument("input", help=input_dir_str, type=str) + dataset.add_argument("-o", "--output", help=output_dir_str, type=str) + dataset.add_argument("-b", "--bids", help=bids_opt, action='store_true') + dataset.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \ + "available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None) + dataset.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \ + "the position variable can be defiend as _, " + \ + "available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None) + dataset.add_argument("--ignore-slope", help='remove slope value from header', action='store_true') + dataset.add_argument("--ignore-offset", help='remove offset value from header', action='store_true') + dataset.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true') + dataset.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true') + +if __name__ == '__main__': + main() \ No newline at end of file diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py new file mode 100644 index 0000000..9a94e5a --- /dev/null +++ b/brkraw/app/tonifti/base.py @@ -0,0 +1,145 @@ +from __future__ import annotations +from enum import Enum +import warnings +import numpy as np +from io import BufferedReader +from zipfile import ZipExtFile +from brkraw.api.brkobj import ScanObj, ScanInfo +from brkraw.api.analyzer import ScanInfoAnalyzer, DataArrayAnalyzer, AffineAnalyzer +from .header import Header +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from pathlib import Path + + +XYZT_UNITS = \ + dict(EPI=('mm', 'sec')) + + +class ScaleMode(Enum): + NONE = 0 + APPLY = 1 + HEADER = 2 + + +class BaseMethods: + info, fileobj = (None, None) + + def set_scale_mode(self, scale_mode:ScaleMode): + self.scale_mode = scale_mode + + def _set_info(self): + analysed = ScanInfoAnalyzer(self) + infoobj = ScanInfo() + + for attr_name in dir(analysed): + if 'info_' in attr_name: + attr_vals = getattr(analysed, attr_name) + setattr(infoobj, attr_name.replace('info_', ''), attr_vals) + if attr_vals and attr_vals['warns']: + infoobj.warns.extend(attr_vals['warns']) + self.info = infoobj + self.analysed = analysed + + @staticmethod + def get_dataobj(scanobj:'ScanInfo'|'ScanObj', + fileobj:'BufferedReader'|'ZipExtFile'|None = None, + reco_id:int|None = None, + scale_correction:bool = False): + data_dict = BaseMethods.get_data_dict(scanobj, fileobj, reco_id) + dataobj = data_dict['data_array'] + if scale_correction: + try: + dataobj = dataobj * data_dict['data_slope'] + data_dict['data_offset'] + except ValueError as e: + warnings.warn( + "Scale correction not applied. The 'slope' and 'offset' provided are not in a tested condition. " + "For further assistance, contact the developer via issue at: https://github.com/brkraw/brkraw.git", + UserWarning) + return dataobj + + @staticmethod + def get_affine(scanobj:'ScanInfo'|'ScanObj', reco_id:int|None = None, + subj_type:str|None=None, subj_position:str|None=None): + return BaseMethods.get_affine_dict(scanobj, reco_id, subj_type, subj_position)['affine'] + + @staticmethod + def get_data_dict(scanobj:'ScanInfo'|'ScanObj', + fileobj:'BufferedReader'|'ZipExtFile'|None = None, + reco_id:int|None = None): + if isinstance(scanobj, ScanObj): + data_info = scanobj.get_data_info(reco_id) + elif isinstance(scanobj, ScanInfo) and isinstance(scanobj, BufferedReader|ZipExtFile): + data_info = DataArrayAnalyzer(scanobj, fileobj) + else: + raise TypeError( + "Unsupported type for 'scanobj'. Expected 'scanobj' to be an instance of 'ScanObj' or " + "'ScanInfo' combined with either 'BufferedReader' or 'ZipExtFile'. Please check the type of 'scanobj' " + "and ensure it matches the expected conditions." + ) + axis_labels = data_info.shape_desc + dataarray = data_info.get_dataarray() + slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 + if slice_axis != 2: + dataarray = np.swapaxes(dataarray, slice_axis, 2) + axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] + return { + 'data_array': dataarray, + 'data_slope': data_info.slope, + 'data_offset': data_info.offset, + 'axis_labels': axis_labels + } + + @staticmethod + def get_affine_dict(scanobj:'ScanInfo'|'ScanObj', reco_id:int|None = None, + subj_type:str|None = None, subj_position:str|None = None): + if isinstance(scanobj, ScanObj): + affine_info = scanobj.get_affine_info(reco_id) + elif isinstance(scanobj, ScanInfo): + affine_info = AffineAnalyzer(scanobj) + else: + raise TypeError( + "Unsupported type for 'scanobj'. Expected 'scanobj' to be an instance of 'ScanObj' or 'ScanInfo'. " + "Please check the type of 'scanobj' and ensure it matches the expected conditions." + ) + subj_type = subj_type or affine_info.subj_type + subj_position = subj_position or affine_info.subj_position + affine = affine_info.get_affine(subj_type, subj_position) + return { + "num_slicepacks": len(affine) if isinstance(affine, list) else 1, + "affine": affine, + "subj_type": subj_type, + "subj_position": subj_position + } + + @staticmethod + def get_bdata(analobj:'ScanInfoAnalyzer'): + """Extract, format, and return diffusion bval and bvec""" + bvals = np.array(analobj.method.get('PVM_DwEffBval')) + bvecs = np.array(analobj.method.get('PVM_DwGradVec').T) + # Correct for single b-vals + if np.size(bvals) < 2: + bvals = np.array([bvals]) + # Normalize bvecs + bvecs_axis = 0 + bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) + bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 + bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) + return bvals, bvecs + + @staticmethod + def get_bids_metadata(scaninfo:'ScanInfo', bids_recipe:'Path'|None=None): + print(isinstance(scaninfo, ScanInfo), bids_recipe) + + @staticmethod + def get_nifti1header(scaninfo:'ScanInfo', scale_mode:'ScaleMode'|None): + scale_mode = scale_mode or ScaleMode.HEADER + return Header(scaninfo, scale_mode).get() + + # @staticmethod + # def get_nifti1image(self, scan_id:int, reco_id:int|None=None, + # subj_type:str|None=None, subj_position:str|None=None, + # scale_mode:ScaleMode = ScaleMode.HEADER): + # smode = scale_mode if scale_mode == ScaleMode.APPLY else ScaleMode.NONE + # data_dict = self.get_dataobj(scan_id, reco_id, smode) + # affine_dict = self.get_affine(scan_id, reco_id, subj_type, subj_position) \ No newline at end of file diff --git a/brkraw/app/tonifti/brkraw.py b/brkraw/app/tonifti/brkraw.py new file mode 100644 index 0000000..d3ac6c0 --- /dev/null +++ b/brkraw/app/tonifti/brkraw.py @@ -0,0 +1,153 @@ +from __future__ import annotations +from brkraw.api.brkobj import StudyObj +from .base import BaseMethods, ScaleMode +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from pathlib import Path + + +class BrkrawToNifti(StudyObj, BaseMethods): + def __init__(self, path:'Path', scale_mode: 'ScaleMode'|None = None): + """_summary_ + + Args: + path (Path): _description_ + scale_mode (ScaleMode | None, optional): _description_. Defaults to None. + """ + + super().__init__(path) + if scale_mode: + self.set_scale_mode(scale_mode) + self._cache = {} + + def get_scan(self, scan_id:int): + """_summary_ + + Args: + scan_id (int): _description_ + + Returns: + _type_: _description_ + """ + if scan_id not in self._cache.keys(): + self._cache[scan_id] = super().get_scan(scan_id) + return self._cache[scan_id] + + def get_scan_analyzer(self, scan_id:int, reco_id:int|None=None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + return self.get_scan(scan_id).get_info(reco_id, get_analyzer=True) + + def get_affine(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + subj_type (str | None, optional): _description_. Defaults to None. + subj_position (str | None, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + scanobj = self.get_scan(scan_id) + return super().get_affine(scanobj=scanobj, reco_id=reco_id, subj_type=subj_type, subj_position=subj_position) + + def get_dataobj(self, scan_id:int, reco_id:int|None=None, scale_mode:'ScaleMode'|None = None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + scale_mode (ScaleMode' | None, optional): _description_. Defaults to None. + + Raises: + ValueError: _description_ + + Returns: + _type_: _description_ + """ + scale_mode = scale_mode or self.scale_mode + if scale_mode == ScaleMode.HEADER: + raise ValueError("The 'HEADER' option for scale_mode is not supported in this context. Only 'NONE' or 'APPLY' options are available. " + "To use the 'HEADER' option, please switch to the 'get_nifti1image' method, which supports storing scales in the header.") + scanobj = self.get_scan(scan_id) + return super().get_dataobj(scanobj=scanobj, fileobj=None, reco_id=reco_id, scale_correction=bool(scale_mode)) + + def get_data_dict(self, scan_id:int, reco_id:int|None=None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + scanobj = self.get_scan(scan_id) + return super().get_data_dict(scanobj=scanobj, reco_id=reco_id) + + def get_affine_dict(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + subj_type (str | None, optional): _description_. Defaults to None. + subj_position (str | None, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + scanobj = self.get_scan(scan_id) + return super().get_affine_dict(scanobj=scanobj, reco_id=reco_id, + subj_type=subj_type, subj_position=subj_position) + + def get_nifti1header(self, scan_id:int, reco_id:int|None=None, scale_mode:'ScaleMode'|None = None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + scale_mode (ScaleMode' | None, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + scale_mode = scale_mode or self.scale_mode + scaninfo = self.get_scan(scan_id).get_info(reco_id) + return super().get_nifti1header(scaninfo, scale_mode).get() + + def get_bdata(self, scan_id:int): + """_summary_ + + Args: + scan_id (int): _description_ + + Returns: + _type_: _description_ + """ + analobj = self.get_scan_analyzer(scan_id) + return super().get_bdata(analobj) + + def get_bids_metadata(self, scan_id:int, reco_id:int|None=None, bids_recipe=None): + """_summary_ + + Args: + scan_id (int): _description_ + reco_id (int | None, optional): _description_. Defaults to None. + bids_recipe (_type_, optional): _description_. Defaults to None. + + Returns: + _type_: _description_ + """ + analobj = self.get_scan_analyzer(scan_id, reco_id) + return super().get_bids_metadata(analobj, bids_recipe) + \ No newline at end of file diff --git a/brkraw/app/tonifti/converter.py b/brkraw/app/tonifti/converter.py new file mode 100644 index 0000000..a54dc17 --- /dev/null +++ b/brkraw/app/tonifti/converter.py @@ -0,0 +1,16 @@ +class Converter: + """ + Data converter to NifTi format, + provide variouse converting mode + the Default is use default + in case of plugin needed, search available plugin (by name of plugin) and run it + the plugin functionality will be implemented using modules in plugin app + + sordino2nii will be first example case of plugin + """ + def __init__(self): + pass + + def save_to(self, output_path): + pass + \ No newline at end of file diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py new file mode 100644 index 0000000..45b0592 --- /dev/null +++ b/brkraw/app/tonifti/header.py @@ -0,0 +1,58 @@ +from __future__ import annotations +import warnings +from nibabel.nifti1 import Nifti1Header +from brkraw.app.tonifti.base import ScaleMode +from typing import TYPE_CHECKING +if TYPE_CHECKING: + from brkraw.api.brkobj import ScanInfo + + +class Header: + def __init__(self, scaninfo:'ScanInfo', scale_mode:'ScaleMode' = ScaleMode.HEADER): + self.info = scaninfo + self.scale_mode = scale_mode + self.nifti1header = Nifti1Header() + self.nifti1header.default_x_flip = False + self._set_scale_params() + self._set_sliceorder() + self._set_time_step() + + def _set_sliceorder(self): + self.info.slicepack + slice_order_scheme = self.info.method.get("PVM_ObjOrderScheme") + if slice_order_scheme == 'User_defined_slice_scheme' or slice_order_scheme: + slice_code = 0 + elif slice_order_scheme == 'Sequential': + slice_code = 1 + elif slice_order_scheme == 'Reverse_sequential': + slice_code = 2 + elif slice_order_scheme == 'Interlaced': + slice_code = 3 + elif slice_order_scheme == 'Reverse_interlacesd': + slice_code = 4 + elif slice_order_scheme == 'Angiopraphy': + slice_code = 5 + else: + slice_code = 0 + + if slice_code == 0: + warnings.warn( + "Failed to identify compatible 'slice_code'. " + "Please use this header information with care in case slice timing correction is needed." + ) + self.nifti1header['slice_code'] = slice_code + + def _set_time_step(self): + if self.info.cycle['num_cycle'] > 1: + time_step = self.info.cycle['time_step'] + self.nifti1header['pixdim'][4] = time_step + num_slices = self.info.slicepack['num_slices_each_pack'][0] + self.nifti1header['slice_duration'] = time_step / num_slices + + def _set_scale_params(self): + if self.scale_mode == ScaleMode.HEADER: + self.nifti1header['scl_slope'] = self.info.dataarray['2dseq_slope'] + self.nifti1header['scl_inter'] = self.info.dataarray['2dseq_offset'] + + def get(self): + return self.nifti1header \ No newline at end of file diff --git a/brkraw/app/tonifti/loader.py b/brkraw/app/tonifti/loader.py new file mode 100644 index 0000000..fb11dbf --- /dev/null +++ b/brkraw/app/tonifti/loader.py @@ -0,0 +1,8 @@ +from .brkraw import BrkrawToNifti +from .pvscan import PvScanToNifti +from .pvreco import PvRecoToNifti +from .pvfiles import PvFilesToNifti + +class Loader: + def __init__(self, *args, **kwargs): + pass \ No newline at end of file diff --git a/brkraw/app/tonifti/main.py b/brkraw/app/tonifti/main.py deleted file mode 100644 index c0fbb0d..0000000 --- a/brkraw/app/tonifti/main.py +++ /dev/null @@ -1,102 +0,0 @@ -import warnings -import numpy as np -from enum import Enum -from brkraw.api.brkobj import StudyObj - - -XYZT_UNITS = \ - dict(EPI=('mm', 'sec')) - - -class ScaleMode(Enum): - NONE = 0 - APPLY = 1 - HEADER = 2 - - -class BrkrawToNifti1(StudyObj): - def __init__(self, path): - super().__init__(path) - self._cache = {} - - def info(self): - pass - - def get_scan(self, scan_id:int): - if scan_id not in self._cache.keys(): - self._cache[scan_id] = super().get_scan(scan_id) - return self._cache[scan_id] - - def get_scan_info(self, scan_id:int, reco_id:int|None=None): - scanobj = self.get_scan(scan_id) - return scanobj.get_info(reco_id, get_analyzer=True) - - def get_affine(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): - return self.get_affine_dict(scan_id, reco_id, subj_type, subj_position)['affine'] - - def get_dataobj(self, scan_id:int, reco_id:int|None=None, scale_mode:ScaleMode = ScaleMode.APPLY): - if scale_mode == ScaleMode.HEADER: - raise ValueError("The 'HEADER' option for scale_mode is not supported in this context. Only 'NONE' or 'APPLY' options are available. " - "To use the 'HEADER' option, please switch to the 'get_nifti1image' method, which supports storing scales in the header.") - data_dict = self.get_data_dict(scan_id, reco_id) - dataobj = data_dict['data_array'] - return dataobj - - def get_data_dict(self, scan_id:int, reco_id:int|None=None): - scanobj = self.get_scan(scan_id) - data_info = scanobj.get_data_info(reco_id) - axis_labels = data_info.shape_desc - dataarray = data_info.get_dataarray() - slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 - if slice_axis != 2: - dataarray = np.swapaxes(dataarray, slice_axis, 2) - axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] - return { - 'data_array': dataarray, - 'data_slope': data_info.slope, - 'data_offset': data_info.offset, - 'axis_labels': axis_labels - } - - def get_affine_dict(self, scan_id:int, reco_id:int|None=None, subj_type:str|None=None, subj_position:str|None=None): - scanobj = self.get_scan(scan_id) - affine_info = scanobj.get_affine_info(reco_id) - subj_type = subj_type or affine_info.subj_type - subj_position = subj_position or affine_info.subj_position - affine = affine_info.get_affine(subj_type, subj_position) - return { - "num_slicepacks": len(affine) if isinstance(affine, list) else 1, - "affine": affine, - "subj_type": subj_type, - "subj_position": subj_position - } - - def get_bids_metadata(self, scan_id:int, reco_id:int|None=None, bids_recipe=None): - pars = self.get_scan_info(scan_id, reco_id) - - def get_bdata(self, scan_id): - """Extract, format, and return diffusion bval and bvec""" - info = self.get_scan_info(scan_id) - bvals = np.array(info.method.get('PVM_DwEffBval')) - bvecs = np.array(info.method.get('PVM_DwGradVec').T) - # Correct for single b-vals - if np.size(bvals) < 2: - bvals = np.array([bvals]) - # Normalize bvecs - bvecs_axis = 0 - bvecs_L2_norm = np.atleast_1d(np.linalg.norm(bvecs, 2, bvecs_axis)) - bvecs_L2_norm[bvecs_L2_norm < 1e-15] = 1 - bvecs = bvecs / np.expand_dims(bvecs_L2_norm, bvecs_axis) - return bvals, bvecs - - def get_nifti1header(self, scan_id:int, reco_id:int|None=None): - pars = self.get_pars(scan_id, reco_id) - - def get_nifti1image(self, scan_id:int, reco_id:int|None=None, - subj_type:str|None=None, subj_position:str|None=None, - scale_mode:ScaleMode = ScaleMode.HEADER): - smode = scale_mode if scale_mode == ScaleMode.APPLY else ScaleMode.NONE - data_dict = self.get_dataobj(scan_id, reco_id, smode) - affine_dict = self.get_affine(scan_id, reco_id, subj_type, subj_position) - - \ No newline at end of file diff --git a/brkraw/app/tonifti/pvfiles.py b/brkraw/app/tonifti/pvfiles.py new file mode 100644 index 0000000..983e392 --- /dev/null +++ b/brkraw/app/tonifti/pvfiles.py @@ -0,0 +1,15 @@ +from pathlib import Path +from brkraw.api.pvobj import PvFiles +from .base import BaseMethods, ScaleMode + +class PvFilesToNifti(PvFiles, BaseMethods): + def __init__(self, *files): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + super.__init__(*files) + self._set_info() + diff --git a/brkraw/app/tonifti/pvreco.py b/brkraw/app/tonifti/pvreco.py new file mode 100644 index 0000000..709b844 --- /dev/null +++ b/brkraw/app/tonifti/pvreco.py @@ -0,0 +1,18 @@ +import os +from pathlib import Path +from brkraw.api.pvobj import PvReco +from .base import BaseMethods, ScaleMode + + +class PvRecoToNifti(PvReco, BaseMethods): + def __init__(self, path): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + rootpath, reco_path = os.path.split(path) + _, dirs, files = os.walk(path) + super.__init__(None, reco_path, (rootpath, reco_path), {'dirs':dirs, 'files':files}) + self._set_info() diff --git a/brkraw/app/tonifti/pvscan.py b/brkraw/app/tonifti/pvscan.py new file mode 100644 index 0000000..6b7f00f --- /dev/null +++ b/brkraw/app/tonifti/pvscan.py @@ -0,0 +1,18 @@ +import os +from pathlib import Path +from brkraw.api.pvobj import PvScan +from .base import BaseMethods, ScaleMode + + +class PvScanToNifti(PvScan, BaseMethods): + def __init__(self, path:'Path'): + """_summary_ + + Args: + data_path (str): path of '2dseq' file in reco_dir + pars_path (str): path of 'visu_pars' file in reco_dir + """ + rootpath, scan_path = os.path.split(path) + _, dirs, files = os.walk(path) + super.__init__(None, scan_path, (rootpath, scan_path), {'dirs':dirs, 'files':files}) + self._set_info() diff --git a/brkraw/app/tonifti/reco.py b/brkraw/app/tonifti/reco.py deleted file mode 100644 index e79d692..0000000 --- a/brkraw/app/tonifti/reco.py +++ /dev/null @@ -1,102 +0,0 @@ -import warnings -import numpy as np -from pathlib import Path -from brkraw.api.pvobj import Parameter -from brkraw.api.brkobj.scan import ScanInfo -from brkraw.api.analyzer import ScanInfoAnalyzer, DataArrayAnalyzer, AffineAnalyzer - - -class RecoToNifti: - def __init__(self, data_path:Path, visu_pars:Path, method:Path=None): - """_summary_ - - Args: - data_path (str): path of '2dseq' file in reco_dir - pars_path (str): path of 'visu_pars' file in reco_dir - """ - self._load_arr(data_path) - self._load_pars(visu_pars, method) - self._set_info() - - def is_recotonifti(self): - return True - - def _set_info(self): - analysed = ScanInfoAnalyzer(self) - infoobj = ScanInfo() - - for attr_name in dir(analysed): - if 'info_' in attr_name: - attr_vals = getattr(analysed, attr_name) - setattr(infoobj, attr_name.replace('info_', ''), attr_vals) - if attr_vals and attr_vals['warns']: - infoobj.warns.extend(attr_vals['warns']) - self.info = infoobj - self.analysed = analysed - - def _load_arr(self, data_path): - self.fileobj = open(data_path, 'rb') - - def _load_pars(self, visu_pars, method): - visu_str = self._open_as_string(visu_pars) - visu_obj = Parameter(visu_str, name='visu_pars') - if not len([k for k in visu_obj.keys() if 'visu' in k.lower()]): - raise TypeError("The loaded file is incompatible with the expected 'visu_pars' file. " - "Please verify that the file path correctly points to a 'visu_pars' file.") - self.visu_pars = visu_obj - if method: - method_str = self._open_as_string(method) - method_obj = Parameter(method_str, name='method') - if not len([k for k in method_obj.keys() if 'pvm_' in k.lower()]): - raise TypeError("The loaded file is incompatible with the expected 'method' file. " - "Please verify that the file path correctly points to a 'method' file.") - self.method = method_obj - else: - self.method = None - warnings.warn("The 'RecoToNifti' object did not receive an input argument for the 'method' file. " - "As a result, the affine matrix may be inaccurate. " - "Please consider providing the 'method' file as input if possible.") - - @staticmethod - def _open_as_fileobj(path: Path): - return open(path, 'rb') - - @classmethod - def _open_as_string(cls, path: Path): - return cls._open_as_fileobj(path).read().decode('UTF-8').split('\n') - - def get_data_dict(self): - data_info = DataArrayAnalyzer(self.analysed, self.fileobj) - axis_labels = data_info.shape_desc - dataarray = data_info.get_dataarray() - slice_axis = axis_labels.index('slice') if 'slice' in axis_labels else 2 - if slice_axis != 2: - dataarray = np.swapaxes(dataarray, slice_axis, 2) - axis_labels[slice_axis], axis_labels[2] = axis_labels[2], axis_labels[slice_axis] - return { - 'data_array': dataarray, - 'data_slope': data_info.slope, - 'data_offset': data_info.offset, - 'axis_labels': axis_labels - } - - def get_affine_dict(self, subj_type:str|None=None, subj_position:str|None=None): - affine_info = AffineAnalyzer(self.info) - subj_type = subj_type or affine_info.subj_type - subj_position = subj_position or affine_info.subj_position - affine = affine_info.get_affine(subj_type, subj_position) - return { - "num_slicepacks": len(affine) if isinstance(affine, list) else 1, - "affine": affine, - "subj_type": subj_type, - "subj_position": subj_position - } - - def get_dataobj(self, scale_correction=True): - data_dict = self.get_data_dict() - if scale_correction: - dataobj = data_dict['data_array'] * data_dict['data_slope'] + data_dict['data_offset'] - return dataobj - - def get_affine(self, subj_type:str|None=None, subj_position:str|None=None): - return self.get_affine_dict(subj_type, subj_position)['affine'] From 3cff2eb38a00f086832ea4c3b488fc8d6e942528 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 15:31:24 -0400 Subject: [PATCH 33/38] [update] minor update for tonifti (typing) --- brkraw/app/tonifti/converter.py | 2 ++ brkraw/app/tonifti/header.py | 8 ++++---- brkraw/app/tonifti/loader.py | 1 + brkraw/app/tonifti/pvfiles.py | 1 + brkraw/app/tonifti/pvreco.py | 5 +++-- brkraw/app/tonifti/pvscan.py | 1 + 6 files changed, 12 insertions(+), 6 deletions(-) diff --git a/brkraw/app/tonifti/converter.py b/brkraw/app/tonifti/converter.py index a54dc17..22cc79d 100644 --- a/brkraw/app/tonifti/converter.py +++ b/brkraw/app/tonifti/converter.py @@ -1,3 +1,5 @@ +from __future__ import annotations + class Converter: """ Data converter to NifTi format, diff --git a/brkraw/app/tonifti/header.py b/brkraw/app/tonifti/header.py index 45b0592..a9f3888 100644 --- a/brkraw/app/tonifti/header.py +++ b/brkraw/app/tonifti/header.py @@ -1,16 +1,16 @@ from __future__ import annotations import warnings from nibabel.nifti1 import Nifti1Header -from brkraw.app.tonifti.base import ScaleMode from typing import TYPE_CHECKING if TYPE_CHECKING: from brkraw.api.brkobj import ScanInfo + from .base import ScaleMode class Header: - def __init__(self, scaninfo:'ScanInfo', scale_mode:'ScaleMode' = ScaleMode.HEADER): + def __init__(self, scaninfo:'ScanInfo', scale_mode:'ScaleMode'|int): self.info = scaninfo - self.scale_mode = scale_mode + self.scale_mode = int(scale_mode) self.nifti1header = Nifti1Header() self.nifti1header.default_x_flip = False self._set_scale_params() @@ -50,7 +50,7 @@ def _set_time_step(self): self.nifti1header['slice_duration'] = time_step / num_slices def _set_scale_params(self): - if self.scale_mode == ScaleMode.HEADER: + if self.scale_mode == 2: self.nifti1header['scl_slope'] = self.info.dataarray['2dseq_slope'] self.nifti1header['scl_inter'] = self.info.dataarray['2dseq_offset'] diff --git a/brkraw/app/tonifti/loader.py b/brkraw/app/tonifti/loader.py index fb11dbf..539a070 100644 --- a/brkraw/app/tonifti/loader.py +++ b/brkraw/app/tonifti/loader.py @@ -1,3 +1,4 @@ +from __future__ import annotations from .brkraw import BrkrawToNifti from .pvscan import PvScanToNifti from .pvreco import PvRecoToNifti diff --git a/brkraw/app/tonifti/pvfiles.py b/brkraw/app/tonifti/pvfiles.py index 983e392..baf617e 100644 --- a/brkraw/app/tonifti/pvfiles.py +++ b/brkraw/app/tonifti/pvfiles.py @@ -1,3 +1,4 @@ +from __future__ import annotations from pathlib import Path from brkraw.api.pvobj import PvFiles from .base import BaseMethods, ScaleMode diff --git a/brkraw/app/tonifti/pvreco.py b/brkraw/app/tonifti/pvreco.py index 709b844..bbab6f9 100644 --- a/brkraw/app/tonifti/pvreco.py +++ b/brkraw/app/tonifti/pvreco.py @@ -1,11 +1,12 @@ +from __future__ import annotations import os from pathlib import Path from brkraw.api.pvobj import PvReco -from .base import BaseMethods, ScaleMode +from .base import BaseMethods class PvRecoToNifti(PvReco, BaseMethods): - def __init__(self, path): + def __init__(self, path: 'Path'): """_summary_ Args: diff --git a/brkraw/app/tonifti/pvscan.py b/brkraw/app/tonifti/pvscan.py index 6b7f00f..ffd939e 100644 --- a/brkraw/app/tonifti/pvscan.py +++ b/brkraw/app/tonifti/pvscan.py @@ -1,3 +1,4 @@ +from __future__ import annotations import os from pathlib import Path from brkraw.api.pvobj import PvScan From 5732f34e96723617e1533d5e766824b657b2be58 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 15:32:03 -0400 Subject: [PATCH 34/38] [update] scale_mode initiation --- brkraw/app/tonifti/base.py | 9 ++++++--- brkraw/app/tonifti/brkraw.py | 13 +++++-------- 2 files changed, 11 insertions(+), 11 deletions(-) diff --git a/brkraw/app/tonifti/base.py b/brkraw/app/tonifti/base.py index 9a94e5a..31ee8de 100644 --- a/brkraw/app/tonifti/base.py +++ b/brkraw/app/tonifti/base.py @@ -25,9 +25,12 @@ class ScaleMode(Enum): class BaseMethods: info, fileobj = (None, None) - def set_scale_mode(self, scale_mode:ScaleMode): - self.scale_mode = scale_mode - + def set_scale_mode(self, scale_mode:ScaleMode|None): + if scale_mode: + self.scale_mode = scale_mode + else: + self.scale_mode = ScaleMode.HEADER + def _set_info(self): analysed = ScanInfoAnalyzer(self) infoobj = ScanInfo() diff --git a/brkraw/app/tonifti/brkraw.py b/brkraw/app/tonifti/brkraw.py index d3ac6c0..db41080 100644 --- a/brkraw/app/tonifti/brkraw.py +++ b/brkraw/app/tonifti/brkraw.py @@ -16,8 +16,7 @@ def __init__(self, path:'Path', scale_mode: 'ScaleMode'|None = None): """ super().__init__(path) - if scale_mode: - self.set_scale_mode(scale_mode) + self.set_scale_mode(scale_mode) self._cache = {} def get_scan(self, scan_id:int): @@ -66,7 +65,7 @@ def get_dataobj(self, scan_id:int, reco_id:int|None=None, scale_mode:'ScaleMode' Args: scan_id (int): _description_ reco_id (int | None, optional): _description_. Defaults to None. - scale_mode (ScaleMode' | None, optional): _description_. Defaults to None. + scale_mode (ScaleMode; | None, optional): _description_. Defaults to None. Raises: ValueError: _description_ @@ -75,11 +74,9 @@ def get_dataobj(self, scan_id:int, reco_id:int|None=None, scale_mode:'ScaleMode' _type_: _description_ """ scale_mode = scale_mode or self.scale_mode - if scale_mode == ScaleMode.HEADER: - raise ValueError("The 'HEADER' option for scale_mode is not supported in this context. Only 'NONE' or 'APPLY' options are available. " - "To use the 'HEADER' option, please switch to the 'get_nifti1image' method, which supports storing scales in the header.") + scale_correction = False if scale_mode == ScaleMode.HEADER else True scanobj = self.get_scan(scan_id) - return super().get_dataobj(scanobj=scanobj, fileobj=None, reco_id=reco_id, scale_correction=bool(scale_mode)) + return super().get_dataobj(scanobj=scanobj, fileobj=None, reco_id=reco_id, scale_correction=scale_correction) def get_data_dict(self, scan_id:int, reco_id:int|None=None): """_summary_ @@ -116,7 +113,7 @@ def get_nifti1header(self, scan_id:int, reco_id:int|None=None, scale_mode:'Scale Args: scan_id (int): _description_ reco_id (int | None, optional): _description_. Defaults to None. - scale_mode (ScaleMode' | None, optional): _description_. Defaults to None. + scale_mode (ScaleMode | None, optional): _description_. Defaults to None. Returns: _type_: _description_ From cf8905145e76c151fabc2dbac8ee3070f8d27d69 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 15:32:33 -0400 Subject: [PATCH 35/38] [new feature] add get_fid and get_2dseq for pvobj --- brkraw/api/brkobj/scan.py | 6 +----- brkraw/api/pvobj/base.py | 35 +++++++++++++++++++++++++++++++++++ 2 files changed, 36 insertions(+), 5 deletions(-) diff --git a/brkraw/api/brkobj/scan.py b/brkraw/api/brkobj/scan.py index 2554dc4..3f4a7f9 100644 --- a/brkraw/api/brkobj/scan.py +++ b/brkraw/api/brkobj/scan.py @@ -55,11 +55,7 @@ def get_affine_info(self, reco_id:int|None = None): def get_data_info(self, reco_id: int|None = None): reco_id = reco_id or self.avail[0] recoobj = self.get_reco(reco_id) - datafiles = [f for f in recoobj._contents['files'] if f == '2dseq'] - if not len(datafiles): - raise FileNotFoundError("The required file '2dseq' does not exist. " - "Please check the dataset and ensure the file is in the expected location.") - fileobj = recoobj._open_as_fileobject(datafiles.pop()) + fileobj = recoobj.get_2dseq() info = self.info if hasattr(self, 'info') else self.get_info(self.reco_id) return DataArrayAnalyzer(info, fileobj) diff --git a/brkraw/api/pvobj/base.py b/brkraw/api/pvobj/base.py index ec2f330..7d36492 100644 --- a/brkraw/api/pvobj/base.py +++ b/brkraw/api/pvobj/base.py @@ -163,6 +163,41 @@ def __getattr__(self, key): def contents(self): return self._contents + def get_fid(self, scan_id:int|None = None): + try: + pvobj = self.get_scan(scan_id) if hasattr(self, 'get_scan') else self + except KeyError: + raise TypeError("Missing required argument: 'scan_id must be provided for {self.__class__.__name__}.") + fid_files = ['fid', 'rawdata.job0'] + for fid in ['fid', 'rawdata.job0']: + if fid in pvobj.contents['files']: + return getattr(pvobj, fid) + raise FileNotFoundError(f"The required file '{' or '.join(fid_files)}' does not exist. " + "Please check the dataset and ensure the file is in the expected location.") + + def get_2dseq(self, scan_id:int|None = None, reco_id:int|None = None): + try: + if scan_id and hasattr(self, 'get_scan'): + pvobj = self.get_scan(scan_id).get_reco(reco_id) + elif reco_id and hasattr(self, 'get_reco'): + pvobj = self.get_reco(reco_id) + else: + pvobj = self + except KeyError: + message = "Missing required argument: " + if hasattr(self, 'get_scan'): + message = f"{message} 'scan_id' and 'reco_id' " + elif hasattr(self, 'get_reco'): + message = f"{message} 'reco_id' " + message = f"{message} must be provided for {self.__class__.__name__}." + raise TypeError(message) + try: + return getattr(pvobj, '2dseq') + except AttributeError: + raise FileNotFoundError("The required file '2dseq' does not exist. " + "Please check the dataset and ensure the file is in the expected location.") + + @staticmethod def _is_binary(fileobj, bytes=512): block = fileobj.read(bytes) From ad5d1c439a7dd637add1c936ce87e890973c1869 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 17:20:50 -0400 Subject: [PATCH 36/38] [update] debugging option for some module --- brkraw/api/analyzer/scaninfo.py | 13 +++++++++---- brkraw/api/brkobj/scan.py | 12 ++++++------ brkraw/api/brkobj/study.py | 4 ++-- 3 files changed, 17 insertions(+), 12 deletions(-) diff --git a/brkraw/api/analyzer/scaninfo.py b/brkraw/api/analyzer/scaninfo.py index 43af2f9..b126ba3 100644 --- a/brkraw/api/analyzer/scaninfo.py +++ b/brkraw/api/analyzer/scaninfo.py @@ -17,11 +17,16 @@ class ScanInfoAnalyzer(BaseAnalyzer): Raises: NotImplementedError: If an operation is not implemented. """ - def __init__(self, pvobj: 'PvScan'|'PvReco'|'PvFiles', reco_id:int|None = None): + def __init__(self, + pvobj: 'PvScan'|'PvReco'|'PvFiles', + reco_id:int|None = None, + debug:bool = False): + self._set_pars(pvobj, reco_id) - self.info_protocol = helper.Protocol(self).get_info() - if self.visu_pars: - self._parse_info() + if not debug: + self.info_protocol = helper.Protocol(self).get_info() + if self.visu_pars: + self._parse_info() def _set_pars(self, pvobj: 'PvScan'|'PvReco'|'PvFiles', reco_id: int|None): for p in ['acqp', 'method']: diff --git a/brkraw/api/brkobj/scan.py b/brkraw/api/brkobj/scan.py index 3f4a7f9..927f5b6 100644 --- a/brkraw/api/brkobj/scan.py +++ b/brkraw/api/brkobj/scan.py @@ -15,7 +15,7 @@ def num_warns(self): class ScanObj(PvScan): def __init__(self, pvscan: 'PvScan', reco_id: int|None = None, - loader_address: int|None = None, analyze: bool=True): + loader_address: int|None = None, debug: bool=False): super().__init__(pvscan._scan_id, (pvscan._rootpath, pvscan._path), pvscan._contents, @@ -24,17 +24,17 @@ def __init__(self, pvscan: 'PvScan', reco_id: int|None = None, self.reco_id = reco_id self._loader_address = loader_address self._pvscan_address = id(pvscan) - if analyze: - self.set_info() + self.is_debug = debug + self.set_info() def set_info(self): self.info = self.get_info(self.reco_id) - def get_info(self, reco_id, get_analyzer:bool=False): + def get_info(self, reco_id:int, get_analyzer:bool = False): infoobj = ScanInfo() - pvscan = self.retrieve_pvscan() - analysed = ScanInfoAnalyzer(pvscan, reco_id) + analysed = ScanInfoAnalyzer(pvscan, reco_id, self.is_debug) + if get_analyzer: return analysed for attr_name in dir(analysed): diff --git a/brkraw/api/brkobj/study.py b/brkraw/api/brkobj/study.py index c63bf4e..0c83340 100644 --- a/brkraw/api/brkobj/study.py +++ b/brkraw/api/brkobj/study.py @@ -8,13 +8,13 @@ def __init__(self, path): super().__init__(path) self._parse_header() - def get_scan(self, scan_id, reco_id=None, analyze=True): + def get_scan(self, scan_id, reco_id=None, debug=False): """ Get a scan object by scan ID. """ pvscan = super().get_scan(scan_id) return ScanObj(pvscan=pvscan, reco_id=reco_id, - loader_address=id(self), analyze=analyze) + loader_address=id(self), debug=debug) def _parse_header(self) -> (Dict | None): if not self.contents or 'subject' not in self.contents['files']: From 504c6ee6dea186f65c25dd169d8e158b96e8c0dc Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 17:21:30 -0400 Subject: [PATCH 37/38] [update] gitignore for internal test script _test*.py --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index dbf4294..e8825c8 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,6 @@ build *.egg-info/* .DS_Store tests/tutorials -_test.py +_test*.py _*.ipynb _*.log \ No newline at end of file From 92a087aaf90160cbe3ea9e048bfada564eee05f8 Mon Sep 17 00:00:00 2001 From: dvm-shlee Date: Sun, 14 Apr 2024 20:13:13 -0400 Subject: [PATCH 38/38] [update] temporary removed functionality --- brkraw/config.py | 16 ++-------------- 1 file changed, 2 insertions(+), 14 deletions(-) diff --git a/brkraw/config.py b/brkraw/config.py index ea51696..545d23e 100644 --- a/brkraw/config.py +++ b/brkraw/config.py @@ -1,5 +1,6 @@ import toml from pathlib import Path +from brkraw import __version__ class ConfigManager: """ @@ -45,20 +46,7 @@ def load_or_create_config(self): """ if not self.config_file.exists(): default_config = { - 'spec': { - 'pvdataset': { - 'binary_files': [], - 'parameter_files': ['subject', 'ResultState', 'AdjStatePerStudy', 'study.MR'] - }, - 'pvscan': { - 'binary_files': ['fid', 'rawdata.job0'], - 'parameter_files': ['method', 'acqp', 'configscan', 'visu_pars', 'AdjStatePerScan'] - }, - 'pvreco': { - 'binary_files': ['2dseq'], - 'parameter_files': ['reco', 'visu_pars', 'procs', 'methreco', 'id'] - } - } + 'version': __version__ } with open(self.config_file, 'w') as f: toml.dump(default_config, f)