diff --git a/.dev_scripts/covignore.cfg b/.dev_scripts/covignore.cfg index 64e01e9..eec26a6 100644 --- a/.dev_scripts/covignore.cfg +++ b/.dev_scripts/covignore.cfg @@ -4,3 +4,4 @@ # .*/utils.py .*/__init__.py +models/ diff --git a/.dev_scripts/diff_coverage_test.sh b/.dev_scripts/diff_coverage_test.sh index e4c6778..83b3fa5 100644 --- a/.dev_scripts/diff_coverage_test.sh +++ b/.dev_scripts/diff_coverage_test.sh @@ -35,7 +35,7 @@ done if [ ! -z "${PY_FILES}" ] then if [ "$REUSE_COVERAGE_REPORT" == "0" ]; then - coverage run --branch --source embodiedscan -m pytest tests/ + coverage run --branch --source mmscan -m pytest tests/ fi coverage report --fail-under 80 -m $PY_FILES interrogate -v --ignore-init-method --ignore-module --ignore-nested-functions --ignore-magic --ignore-regex "__repr__" --fail-under 95 $PY_FILES diff --git a/.dev_scripts/linter.sh b/.dev_scripts/linter.sh index 15fba23..0f1e819 100644 --- a/.dev_scripts/linter.sh +++ b/.dev_scripts/linter.sh @@ -1,3 +1,3 @@ -yapf -r -i embodiedscan/ configs/ tests/ tools/ -isort embodiedscan/ configs/ tests/ tools/ +yapf -r -i mmscan/ data_preparation/ +isort mmscan/ data_preparation/ flake8 . diff --git a/.github/workflows/lint.yml b/.github/workflows/lint.yml index 3636c47..06a3b8d 100644 --- a/.github/workflows/lint.yml +++ b/.github/workflows/lint.yml @@ -24,4 +24,4 @@ jobs: - name: Check docstring coverage run: | pip install interrogate - interrogate -v --ignore-init-method --ignore-magic --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 90 embodiedscan + interrogate -v --ignore-init-method --ignore-magic --ignore-module --ignore-nested-functions --ignore-regex "__repr__" --fail-under 90 mmscan diff --git a/.github/workflows/merge_stage_test.yml b/.github/workflows/merge_stage_test.yml index 432d4b0..b4b551f 100644 --- a/.github/workflows/merge_stage_test.yml +++ b/.github/workflows/merge_stage_test.yml @@ -38,11 +38,11 @@ jobs: apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub - name: Install system dependencies run: apt-get update && apt-get install -y git ffmpeg libturbojpeg - - name: Install dependencies and EmbodiedScan + - name: Install dependencies and MMScan run: python install.py all - name: Run unittests and generate coverage report run: | - coverage run --branch --source embodiedscan -m pytest tests + coverage run --branch --source mmscan -m pytest tests coverage xml coverage report -m @@ -67,10 +67,10 @@ jobs: apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub - name: Install system dependencies run: apt-get update && apt-get install -y git ffmpeg libturbojpeg - - name: Install dependencies and EmbodiedScan + - name: Install dependencies and MMScan run: python install.py all - name: Run unittests and generate coverage report run: | - coverage run --branch --source embodiedscan -m pytest tests + coverage run --branch --source mmscan -m pytest tests coverage xml coverage report -m diff --git a/.github/workflows/pr_stage_test.yml b/.github/workflows/pr_stage_test.yml index ed033b6..eb6633a 100644 --- a/.github/workflows/pr_stage_test.yml +++ b/.github/workflows/pr_stage_test.yml @@ -34,10 +34,10 @@ jobs: apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/machine-learning/repos/ubuntu1804/x86_64/7fa2af80.pub - name: Install system dependencies run: apt-get update && apt-get install -y ffmpeg libsm6 libxext6 git ninja-build libglib2.0-0 libsm6 libxrender-dev libxext6 - - name: Install dependencies and EmbodiedScan + - name: Install dependencies and MMScan run: python install.py all - name: Run unittests and generate coverage report run: | - coverage run --branch --source embodiedscan -m pytest tests/ + coverage run --branch --source mmscan -m pytest tests/ coverage xml coverage report -m diff --git a/.github/workflows/test_mim.yml b/.github/workflows/test_mim.yml index b8522ea..20095f0 100644 --- a/.github/workflows/test_mim.yml +++ b/.github/workflows/test_mim.yml @@ -41,4 +41,4 @@ jobs: - name: Build and install run: rm -rf .eggs && mim install -e . - name: test commands of mim - run: mim search embodiedscan + run: mim search mmscan diff --git a/.gitignore b/.gitignore index fb239ba..272a32b 100644 --- a/.gitignore +++ b/.gitignore @@ -14,8 +14,7 @@ dist/ downloads/ eggs/ .eggs/ -lib/ -lib64/ + parts/ sdist/ var/ @@ -113,7 +112,6 @@ venv.bak/ # demo *.jpg -*.png *.obj *.ply demo/data/* @@ -127,7 +125,7 @@ data/3rscan data/matterport3d data/arkitscenes data/*.pkl -data/*.json + exps/ todo.md @@ -140,4 +138,3 @@ tools/*.sh # test submission results *.pkl -*.json diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 81b2d9a..24ee153 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -3,6 +3,13 @@ repos: rev: 5.0.4 hooks: - id: flake8 + exclude: '^models/.*' + # - repo: https://github.com/ambv/black + # rev: 23.9.1 + # hooks: + # - id: black + # args: [--line-length=79] + # exclude: '^models/.*' - repo: https://github.com/PyCQA/isort rev: 5.11.5 hooks: diff --git a/README.md b/README.md index 49f145d..a852d5b 100644 --- a/README.md +++ b/README.md @@ -1,332 +1,311 @@

-

EmbodiedScan: A Holistic Multi-Modal 3D Perception Suite Towards Embodied AI

+

MMScan: A Multi-Modal 3D Scene Dataset with Hierarchical Grounded Language Annotations

-This repository contains EmbodiedScan-series works for holistic multi-modal 3D perception, currently including [EmbodiedScan](https://tai-wang.github.io/embodiedscan/) & [MMScan](https://tai-wang.github.io/mmscan/). +

+

-
+[![arXiv](https://img.shields.io/badge/arXiv-2312.16170-blue)](https://arxiv.org/abs/2312.16170) +[![](https://img.shields.io/badge/Paper-%F0%9F%93%96-blue)](./assets/2024_NeurIPS_MMScan_Camera_Ready.pdf) +[![](https://img.shields.io/badge/Project-%F0%9F%9A%80-blue)](https://tai-wang.github.io/mmscan) -## 🤖 [Demo](https://tai-wang.github.io/embodiedscan) + - +## 🤖 [Demo](https://tai-wang.github.io/mmscan) -[![demo](assets/demo_fig.png "demo")](https://tai-wang.github.io/embodiedscan) +[![demo](assets/demo.png "demo")](https://tai-wang.github.io/mmscan) ## 📋 Contents 1. [About](#-about) -2. [News](#-news) -3. [Getting Started](#-getting-started) -4. [Model and Benchmark](#-model-and-benchmark) -5. [TODO List](#-todo-list) -6. [Citation](#-citation) -7. [License](#-license) -8. [Acknowledgements](#-acknowledgements) +2. [Getting Started](#-getting-started) +3. [Model and Benchmark](#-model-and-benchmark) +4. [TODO List](#-todo-list) ## 🏠 About
- Dialogue_Teaser + Dialogue_Teaser
-In the realm of computer vision and robotics, embodied agents are expected to explore their environment and carry out human instructions. -This necessitates the ability to fully understand 3D scenes given their first-person observations and contextualize them into language for interaction. -However, traditional research focuses more on scene-level input and output setups from a global view. -To address the gap, we introduce EmbodiedScan, a multi-modal, ego-centric 3D perception dataset and benchmark for holistic 3D scene understanding. -It encompasses over 5k scans encapsulating 1M ego-centric RGB-D views, 1M language prompts, 160k 3D-oriented boxes spanning over 760 categories, some of which partially align with LVIS, and dense semantic occupancy with 80 common categories. -Building upon this database, we introduce a baseline framework named Embodied Perceptron. It is capable of processing an arbitrary number of multi-modal inputs and demonstrates remarkable 3D perception capabilities, both within the two series of benchmarks we set up, i.e., fundamental 3D perception tasks and language-grounded tasks, and in the wild. - -## 🔥 News -- \[2024-09\] We are pleased to announce the release of EmbodiedScan v2 beta, with original annotations on newly added ~5k scans from ARKitScenes and the beta version of MMScan's annotations on the original 5k scans. Fill in the [form](https://docs.google.com/forms/d/e/1FAIpQLScUXEDTksGiqHZp31j7Zp7zlCNV7p_08uViwP_Nbzfn3g6hhw/viewform) to apply for downloading. Welcome for any feedback! -- \[2024-08\] We preliminarily release the [sample data](https://drive.google.com/file/d/1Y1_LOE35NpsnkneYElvNwuuR6-OAbwPm/view?usp=sharing) of [MMScan](https://tai-wang.github.io/mmscan/) and the full release will be ready with ARKitScenes' annotations this month, which will be announced via emails to the community. Please stay tuned! -- \[2024-06\] The report of our follow-up work with the most-ever hierarchical grounded language annotations, [MMScan](https://tai-wang.github.io/mmscan/), has been released. Welcome to talk with us about EmbodiedScan and MMScan at Seattle, CVPR 2024! -- \[2024-04\] We release all the baselines with pretrained models and logs. Welcome to try and play with them on our demo data! Note that we rename some keys in the multi-view 3D detection and visual grounding model. Please re-download the pretrained models if you just use our code for inference. -- \[2024-03\] The challenge test server is also online [here](https://huggingface.co/spaces/AGC2024/visual-grounding-2024). Looking forward to your strong submissions! -- \[2024-03\] We first release the data and baselines for the challenge. Please fill in the [form](https://docs.google.com/forms/d/e/1FAIpQLScUXEDTksGiqHZp31j7Zp7zlCNV7p_08uViwP_Nbzfn3g6hhw/viewform?usp=sf_link) to apply for downloading the data and try our baselines. Welcome any feedback! -- \[2024-02\] We will co-organize [Autonomous Grand Challenge](https://opendrivelab.com/challenge2024/) in CVPR 2024. Welcome to try the Multi-View 3D Visual Grounding track! We will release more details about the challenge with the baseline after the Chinese New Year. -- \[2023-12\] We release the [paper](./assets/EmbodiedScan.pdf) of EmbodiedScan. Please check the [webpage](https://tai-wang.github.io/embodiedscan) and view our demos! - -## 📚 Getting Started - -### Installation - -We test our codes under the following environment: - -- Ubuntu 20.04 -- NVIDIA Driver: 525.147.05 -- CUDA 12.0 -- Python 3.8.18 -- PyTorch 1.11.0+cu113 -- PyTorch3D 0.7.2 -1. Clone this repository. +With the emergence of LLMs and their integration with other data modalities, +multi-modal 3D perception attracts more attention due to its connectivity to the +physical world and makes rapid progress. However, limited by existing datasets, +previous works mainly focus on understanding object properties or inter-object +spatial relationships in a 3D scene. To tackle this problem, this paper builds the +first largest ever multi-modal 3D scene dataset and benchmark with hierarchical +grounded language annotations, MMScan. It is constructed based on a top-down +logic, from region to object level, from a single target to inter-target relation +ships, covering holistic aspects of spatial and attribute understanding. The overall +pipeline incorporates powerful VLMs via carefully designed prompts to initialize +the annotations efficiently and further involve humans’ correction in the loop to +ensure the annotations are natural, correct, and comprehensive. Built upon exist +ing 3D scanning data, the resulting multi-modal 3D dataset encompasses 1.4M +meta-annotated captions on 109k objects and 7.7k regions as well as over 3.04M +diverse samples for 3D visual grounding and question-answering benchmarks. We +evaluate representative baselines on our benchmarks, analyze their capabilities in +different aspects, and showcase the key problems to be addressed in the future. +Furthermore, we use this high-quality dataset to train state-of-the-art 3D visual +grounding and LLMs and obtain remarkable performance improvement both on +existing benchmarks and in-the-wild evaluation. + +## 🚀 Getting Started: -```bash -git clone https://github.com/OpenRobotLab/EmbodiedScan.git -cd EmbodiedScan -``` +### Installation -2. Create an environment and install PyTorch. +1. Clone Github repo. -```bash -conda create -n embodiedscan python=3.8 -y # pytorch3d needs python>3.7 -conda activate embodiedscan -# Install PyTorch, for example, install PyTorch 1.11.0 for CUDA 11.3 -# For more information, please refer to https://pytorch.org/get-started/locally/ -conda install pytorch==1.11.0 torchvision==0.12.0 torchaudio==0.11.0 cudatoolkit=11.3 -c pytorch -``` + ```shell + git clone git@github.com:rbler1234/MMScan.git + cd MMScan + ``` -3. Install EmbodiedScan. +2. Install requirements. -```bash -# We plan to make EmbodiedScan easier to install by "pip install EmbodiedScan". -# Please stay tuned for the future official release. -# Make sure you are under ./EmbodiedScan/ -# This script will install the dependencies and EmbodiedScan package automatically. -# use [python install.py run] to install only the execution dependencies -# use [python install.py visual] to install only the visualization dependencies -python install.py all # install all the dependencies -``` + Your environment needs to include Python version 3.8 or higher. -**Note:** The automatic installation script make each step a subprocess and the related messages are only printed when the subprocess is finished or killed. Therefore, it is normal to seemingly hang when installing heavier packages, such as Mink Engine and PyTorch3D. + ```shell + conda activate your_env_name + python intall.py all/VG/QA + ``` -BTW, from our experience, it is easier to encounter problems when installing these two packages. Feel free to post your questions or suggestions during the installation procedure. + Use `"all"` to install all components and specify `"VG"` or `"QA"` if you only need to install the components for Visual Grounding or Question Answering, respectively. ### Data Preparation -Please refer to the [guide](data/README.md) for downloading and organization. +1. Download the Embodiedscan and MMScan annotation. (Fill in the [form](https://docs.google.com/forms/d/e/1FAIpQLScUXEDTksGiqHZp31j7Zp7zlCNV7p_08uViwP_Nbzfn3g6hhw/viewform) to apply for downloading) -### Tutorial + Create a folder `mmscan_data/` and then unzip the files. For the first zip file, put `embodiedscan` under `mmscan_data/embodiedscan_split` and rename it to `embodiedscan-v1`. For the second zip file, put `MMScan-beta-release` under `mmscan_data/MMScan-beta-release` and `embodiedscan-v2` under `mmscan_data/embodiedscan_split`. -We provide a simple tutorial [here](https://github.com/OpenRobotLab/EmbodiedScan/blob/main/embodiedscan/tutorial.ipynb) as a guideline for the basic analysis and visualization of our dataset. Welcome to try and post your suggestions! + The directory structure should be as below: -### Demo Inference + ``` + mmscan_data + ├── embodiedscan_split + │ ├──embodiedscan-v1/ # EmbodiedScan v1 data in 'embodiedscan.zip' + │ ├──embodiedscan-v2/ # EmbodiedScan v2 data in 'embodiedscan-v2-beta.zip' + ├── MMScan-beta-release # MMScan veta data in 'embodiedscan-v2-beta.zip' + ``` -We provide a demo for running EmbodiedScan's model on a sample scan. Please download the raw data from [Google Drive](https://drive.google.com/file/d/1nXIbH56TmIoEVv1AML7mZS0szTR5HgNC/view?usp=sharing) or [BaiduYun](https://pan.baidu.com/s/1GK9Z4M-VbRSMWErB39QGpg?pwd=v5w1) and refer to the [notebook](demo/demo.ipynb) for more details. +2. Prepare the point clouds files. -## 📦 Model and Benchmark + Please refer to the [guide](data_preparation/README.md) here. -### Model Overview +## 👓 MMScan API Tutorial -

- -

-Embodied Perceptron accepts RGB-D sequence with any number of views along with texts as multi-modal input. It uses classical encoders to extract features for each modality and adopts dense and isomorphic sparse fusion with corresponding decoders for different predictions. The 3D features integrated with the text feature can be further used for language-grounded understanding. - - +```bash +import mmscan -### Training and Evaluation +# (1) The dataset tool +import mmscan.MMScan as MMScan_dataset -We provide configs for different tasks [here](configs/) and you can run the train and test script in the [tools folder](tools/) for training and inference. -For example, to train a multi-view 3D detection model with pytorch, just run: +# (2) The evaluator tool ('VisualGroundingEvaluator', 'QuestionAnsweringEvaluator', 'GPTEvaluator') +import mmscan.VisualGroundingEvaluator as MMScan_VG_evaluator -```bash -# Single GPU training -python tools/train.py configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py --work-dir=work_dirs/mv-3ddet +import mmscan.QuestionAnsweringEvaluator as MMScan_QA_evaluator -# Multiple GPU training -python tools/train.py configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py --work-dir=work_dirs/mv-3ddet --launcher="pytorch" +import mmscan.GPTEvaluator as MMScan_GPT_evaluator ``` -Or on the cluster with multiple machines, run the script with the slurm launcher following the sample script provided [here](tools/mv-grounding.sh). +### MMScan Dataset -NOTE: To run the multi-view 3D grounding experiments, please first download the 3D detection pretrained model to accelerate its training procedure. After downloading the detection checkpoint, please check the path used in the config, for example, the `load_from` [here](https://github.com/OpenRobotLab/EmbodiedScan/blob/main/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py#L210), is correct. +The dataset tool in MMScan allows seamless access to data required for various tasks within MMScan. -To inference and evaluate the model (e.g., the checkpoint `work_dirs/mv-3ddet/epoch_12.pth`), just run the test script: +#### Usage -```bash -# Single GPU testing -python tools/test.py configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py work_dirs/mv-3ddet/epoch_12.pth +Initialize the dataset for a specific task with: -# Multiple GPU testing -python tools/test.py configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py work_dirs/mv-3ddet/epoch_12.pth --launcher="pytorch" +```bash +my_dataset = MMScan_dataset(split='train', task="MMScan-QA", ratio=1.0) +# Access a specific sample +print(my_dataset[index]) ``` -### Using Visualizer during inference +#### Data Access -We provide EmbodiedScanBaseVisualizer to visualize the output of models during inference. Please refer to the [guide](embodiedscan/visualizer/README.md) for detail. +Each dataset item is a dictionary containing key elements: -### Inference and Submit your Results +(1) 3D Modality -We preliminarily support format-only inference for multi-view 3D visual grounding. To achieve format-only inference during test, just set `format_only=True` in `test_evaluator` in the corresponding config like [here](https://github.com/OpenRobotLab/EmbodiedScan/blob/main/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py#L183). Then just run the test script like: - -```bash -python tools/test.py configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py work_dirs/mv-grounding/epoch_12.pth --launcher="pytorch" -``` +- **"ori_pcds"** (tuple\[tensor\]): Raw point cloud data from the `.pth` file. +- **"pcds"** (np.ndarray): Point cloud data, dimensions (\[n_points, 6(xyz+rgb)\]). +- **"instance_labels"** (np.ndarray): Instance IDs for each point. +- **"class_labels"** (np.ndarray): Class IDs for each point. +- **"bboxes"** (dict): Bounding boxes in the scan. -The prediction file will be saved to `./test_results.json` in the current directory. -You can also set the `result_dir` in `test_evaluator` to specify the directory to save the result file. +(2) Language Modality -Finally, to pack the prediction file into the submission format, please modify the script `tools/submit_results.py` according to your team information and saving paths, and run: +- **"sub_class"**: Sample category. +- **"ID"**: Unique sample ID. +- **"scan_id"**: Corresponding scan ID. +- **--------------For Visual Grounding Task** +- **"target_id"** (list\[int\]): IDs of target objects. +- **"text"** (str): Grounding text. +- **"target"** (list\[str\]): Types of target objects. +- **"anchors"** (list\[str\]): Types of anchor objects. +- **"anchor_ids"** (list\[int\]): IDs of anchor objects. +- **"tokens_positive"** (dict): Position indices of mentioned objects in the text. +- **--------------ForQuestion Answering Task** +- **"question"** (str): The question text. +- **"answers"** (list\[str\]): List of possible answers. +- **"object_ids"** (list\[int\]): Object IDs referenced in the question. +- **"object_names"** (list\[str\]): Types of referenced objects. +- **"input_bboxes_id"** (list\[int\]): IDs of input bounding boxes. +- **"input_bboxes"** (list\[np.ndarray\]): Input bounding boxes, 9 DoF. -```bash -python tools/submit_results.py -``` +(3) 2D Modality -Then you can submit the resulting pkl file to the test server and wait for the lottery :) +- **'img_path'** (str): Path to RGB image. +- **'depth_img_path'** (str): Path to depth image. +- **'intrinsic'** (np.ndarray): Camera intrinsic parameters for RGB images. +- **'depth_intrinsic'** (np.ndarray): Camera intrinsic parameters for depth images. +- **'extrinsic'** (np.ndarray): Camera extrinsic parameters. +- **'visible_instance_id'** (list): IDs of visible objects in the image. -We also provide a sample script `tools/eval_script.py` for evaluating the submission file and you can check it by yourself to ensure your submitted file has the correct format. +### MMScan Evaluator -### Benchmark +Our evaluation tool is designed to streamline the assessment of model outputs for the MMScan task, providing essential metrics to gauge model performance effectively. -We preliminarily provide several baseline results here with their logs and pretrained models. +#### 1. Visual Grounding Evaluator -Note that the performance is a little different from the results provided in the paper because we re-split the training set as the released training and validation set while keeping the original validation set as the test set for the public benchmark. +For the visual grounding task, our evaluator computes multiple metrics including AP (Average Precision), AR (Average Recall), AP_C, AR_C, and gtop-k: -#### Multi-View 3D Detection +- **AP and AR**: These metrics calculate the precision and recall by considering each sample as an individual category. +- **AP_C and AR_C**: These versions categorize samples belonging to the same subclass and calculate them together. +- **gtop-k**: An expanded metric that generalizes the traditional top-k metric, offering insights into broader performance aspects. -| Method | Input | AP@0.25 | AR@0.25 | AP@0.5 | AR@0.5 | Download | -|:------:|:-----:|:-------:|:-------:|:------:|:------:|:------:| -| [Baseline](configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py) | RGB-D | 15.22 | 52.23 | 8.13 | 26.66 | [Model](https://download.openmmlab.com/mim-example/embodiedscan/mv-3ddet.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-3ddet.log) | +Below is an example of how to utilize the Visual Grounding Evaluator: -#### Continuous 3D Detection +```python +# Initialize the evaluator with show_results enabled to display results +my_evaluator = MMScan_VG_evaluator(show_results=True) -| Method | Input | AP@0.25 | AR@0.25 | AP@0.5 | AR@0.5 | Download | -|:------:|:-----:|:-------:|:-------:|:------:|:------:|:------:| -| [Baseline](configs/detection/cont-det3d_8xb1_embodiedscan-3d-284class-9dof.py) | RGB-D | 17.83 | 47.53 | 9.04 | 23.04 | [Model](https://download.openmmlab.com/mim-example/embodiedscan/cont-3ddet.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/cont-3ddet.log) | +# Update the evaluator with the model's output +my_evaluator.update(model_output) -#### Multi-View 3D Visual Grounding +# Start the evaluation process and retrieve metric results +metric_dict = my_evaluator.start_evaluation() -| Method |AP@0.25| AP@0.5| Download | -|:------:|:-----:|:-------:|:------:| -| [Baseline-Mini](configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py) | 33.59 | 14.40 | [Model](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding.log) | -| [Baseline-Mini (w/ FCAF box coder)](configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_fcaf-coder.py) | - | - | - | -| [Baseline-Full](configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py) | 36.78 | 15.97 | [Model](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-full.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-full.log) | +# Optional: Retrieve detailed sample-level results +print(my_evaluator.records) -Note: As mentioned in the paper, due to much more instances annotated with our new tools and pipelines, we concatenate several simple prompts as more complex ones to ensure those prompts to be more accurate without potential ambiguity. The above table is the benchmark without complex prompts using the initial version of visual grounding data. +# Optional: Show the table of results +print(my_evaluator.print_result()) -We found such data is much less than the main part though, it can boost the multi-modal model's performance a lot. Meanwhile, whether to include these data in the validation set is not much important. We provide the updated benchmark as below and update a version of visual grounding data via emails to the community. +# Important: Reset the evaluator after use +my_evaluator.reset() +``` -| Method | train | val | AP@0.25| AP@0.5| Download | -|:------:|:-----:|:---:|:------:|:-----:|:--------:| -| [Baseline-Full](configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py) | w/o complex | w/o complex | 36.78 | 15.97 | [Model](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-full.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-full.log) | -| [Baseline-Full](configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py) | w/ complex | w/o complex | 39.26 | 18.86 |[Model](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-complex.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-complex.log) | -| [Baseline-Full](configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py) | w/ complex | w/ complex | 39.21 | 18.84 |[Model](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-complex.pth), [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-grounding-complex.log) | +The evaluator expects input data in a specific format, structured as follows: + +```python +[ + { + "pred_scores" (tensor/ndarray): Confidence scores for each prediction. Shape: (num_pred, 1) + + "pred_bboxes"/"gt_bboxes" (tensor/ndarray): List of 9 DoF bounding boxes. + Supports two input formats: + 1. 9-dof box format: (num_pred/gt, 9) + 2. center, size and rotation matrix: + "center": (num_pred/gt, 3), + "size" : (num_pred/gt, 3), + "rot" : (num_pred/gt, 3, 3) + + "subclass": The subclass of each VG sample. + "index": Index of the sample. + } + ... +] +``` -#### Multi-View Occupancy Prediction +#### 2. Question Answering Evaluator -| Method | Input | mIoU | Download | -|:------:|:-----:|:----:|:--------:| -| [Baseline](configs/occupancy/mv-occ_8xb1_embodiedscan-occ-80class.py) | RGB-D | 21.28 | [Log](https://download.openmmlab.com/mim-example/embodiedscan/mv-occ.log) | +The question answering evaluator measures performance using several established metrics: -#### Continuous Occupancy Prediction +- **Bleu-X**: Evaluates n-gram overlap between prediction and ground truths. +- **Meteor**: Focuses on precision, recall, and synonymy. +- **CIDEr**: Considers consensus-based agreement. +- **SPICE**: Used for semantic propositional content. +- **SimCSE/SBERT**: Semantic similarity measures using sentence embeddings. +- **EM (Exact Match) and Refine EM**: Compare exact matches between predictions and ground truths. -| Method | Input | mIoU | Download | -|:------:|:-----:|:----:|:--------:| -| [Baseline](configs/occupancy/cont-occ_8xb1_embodiedscan-occ-80class.py) | RGB-D | 22.92 | [Log](https://download.openmmlab.com/mim-example/embodiedscan/cont-occ.log) | +```python +# Initialize evaluator with pre-trained weights for SIMCSE and SBERT +my_evaluator = MMScan_QA_evaluator(model_config={}, show_results=True) -Because the occupancy prediction models are a little large, we save them via OpenXLab and do not provide direct download links here. To download these checkpoints on OpenXLab, please run the following commands: +# Update evaluator with model output +my_evaluator.update(model_output) -```bash -# If you did not install LFS before -git lfs install -# git clone EmbodiedScan model repo via -git clone https://code.openxlab.org.cn/wangtai/EmbodiedScan.git -# Then you can cd EmbodiedScan to get all the pretrained models -``` +# Start evaluation and obtain metrics +metric_dict = my_evaluator.start_evaluation() -Please see the [paper](./assets/EmbodiedScan.pdf) for more details of our benchmarks. This dataset is still scaling up and the benchmark is being polished and extended. Please stay tuned for our recent updates. +# Optional: View detailed sample-level results +print(my_evaluator.records) -## 📝 TODO List +# Important: Reset evaluator after completion +my_evaluator.reset() +``` -- \[x\] Release the paper and partial codes for datasets. -- \[x\] Release EmbodiedScan annotation files. -- \[x\] Release partial codes for models and evaluation. -- \[ \] Polish dataset APIs and related codes. -- \[x\] Release Embodied Perceptron pretrained models. -- \[x\] Release multi-modal datasets and codes. -- \[x\] Release codes for our baselines and benchmarks. -- \[ \] Release codes for all the other methods. -- \[ \] Full release and further updates. -- \[ \] Release MMScan data and codes. - -## 🔗 Citation - -If you find our work helpful, please cite: - -```bibtex -@inproceedings{embodiedscan, - title={EmbodiedScan: A Holistic Multi-Modal 3D Perception Suite Towards Embodied AI}, - author={Wang, Tai and Mao, Xiaohan and Zhu, Chenming and Xu, Runsen and Lyu, Ruiyuan and Li, Peisen and Chen, Xiao and Zhang, Wenwei and Chen, Kai and Xue, Tianfan and Liu, Xihui and Lu, Cewu and Lin, Dahua and Pang, Jiangmiao}, - year={2024}, - booktitle={IEEE Conference on Computer Vision and Pattern Recognition (CVPR)}, -} -@inproceedings{mmscan, - title={MMScan: A Multi-Modal 3D Scene Dataset with Hierarchical Grounded Language Annotations}, - author={Lyu, Ruiyuan and Wang, Tai and Lin, Jingli and Yang, Shuai and Mao, Xiaohan and Chen, Yilun and Xu, Runsen and Huang, Haifeng and Zhu, Chenming and Lin, Dahua and Pang, Jiangmiao}, - year={2024}, - booktitle={arXiv}, -} +The evaluator requires input data structured as follows: + +```python +[ + { + "question" (str): The question text, + "pred" (list[str]): The predicted answer, single element list, + "gt" (list[str]): Ground truth answers, containing multiple elements, + "ID": Unique ID for each QA sample, + "index": Index of the sample, + } + ... +] ``` -If you use our dataset and benchmark, please kindly cite the original datasets involved in our work. BibTex entries are provided below. +#### 3. GPT Evaluator -
Dataset BibTex +In addition to classical QA metrics, the GPT evaluator offers a more advanced evaluation process. -```BibTex -@inproceedings{dai2017scannet, - title={ScanNet: Richly-annotated 3D Reconstructions of Indoor Scenes}, - author={Dai, Angela and Chang, Angel X. and Savva, Manolis and Halber, Maciej and Funkhouser, Thomas and Nie{\ss}ner, Matthias}, - booktitle = {Proceedings IEEE Computer Vision and Pattern Recognition (CVPR)}, - year = {2017} -} -``` +```python +# Initialize GPT evaluator with an API key for access +my_evaluator = MMScan_GPT_Evaluator(API_key='XXX') -```BibTex -@inproceedings{Wald2019RIO, - title={RIO: 3D Object Instance Re-Localization in Changing Indoor Environments}, - author={Johanna Wald, Armen Avetisyan, Nassir Navab, Federico Tombari, Matthias Niessner}, - booktitle={Proceedings IEEE International Conference on Computer Vision (ICCV)}, - year = {2019} -} -``` +# Load, evaluate with multiprocessing, and store results in temporary path +metric_dict = my_evaluator.load_and_eval(model_output, num_threads=5, tmp_path='XXX') -```BibTex -@article{Matterport3D, - title={{Matterport3D}: Learning from {RGB-D} Data in Indoor Environments}, - author={Chang, Angel and Dai, Angela and Funkhouser, Thomas and Halber, Maciej and Niessner, Matthias and Savva, Manolis and Song, Shuran and Zeng, Andy and Zhang, Yinda}, - journal={International Conference on 3D Vision (3DV)}, - year={2017} -} +# Important: Reset evaluator when finished +my_evaluator.reset() ``` -
+The input structure remains the same as for the question answering evaluator: + +```python +[ + { + "question" (str): The question text, + "pred" (list[str]): The predicted answer, single element list, + "gt" (list[str]): Ground truth answers, containing multiple elements, + "ID": Unique ID for each QA sample, + "index": Index of the sample, + } + ... +] +``` -## 📄 License +### Models -Creative Commons License -
-This work is under the Creative Commons Attribution-NonCommercial-ShareAlike 4.0 International License. +We have adapted the MMScan API for some [models](./models/README.md). -## 👏 Acknowledgements +## 📝 TODO List -- [OpenMMLab](https://github.com/open-mmlab): Our dataset code uses [MMEngine](https://github.com/open-mmlab/mmengine) and our model is built upon [MMDetection3D](https://github.com/open-mmlab/mmdetection3d). -- [PyTorch3D](https://github.com/facebookresearch/pytorch3d): We use some functions supported in PyTorch3D for efficient computations on fundamental 3D data structures. -- [ScanNet](https://github.com/ScanNet/ScanNet), [3RScan](https://github.com/WaldJohannaU/3RScan), [Matterport3D](https://github.com/niessner/Matterport): Our dataset uses the raw data from these datasets. -- [ReferIt3D](https://github.com/referit3d/referit3d): We refer to the SR3D's approach to obtaining the language prompt annotations. -- [SUSTechPOINTS](https://github.com/naurril/SUSTechPOINTS): Our annotation tool is developed based on the open-source framework used by SUSTechPOINTS. +- \[ \] More Visual Grounding baselines and Question Answering baselines. +- \[ \] Full release and further updates. diff --git a/assets/2024_NeurIPS_MMScan_Camera_Ready.pdf b/assets/2024_NeurIPS_MMScan_Camera_Ready.pdf new file mode 100644 index 0000000..ff3ac2c Binary files /dev/null and b/assets/2024_NeurIPS_MMScan_Camera_Ready.pdf differ diff --git a/assets/EmbodiedScan.pdf b/assets/EmbodiedScan.pdf deleted file mode 100644 index fa2f05c..0000000 Binary files a/assets/EmbodiedScan.pdf and /dev/null differ diff --git a/assets/MMScan_teaser.png b/assets/MMScan_teaser.png new file mode 100644 index 0000000..7486427 Binary files /dev/null and b/assets/MMScan_teaser.png differ diff --git a/assets/demo.png b/assets/demo.png new file mode 100644 index 0000000..8e9297d Binary files /dev/null and b/assets/demo.png differ diff --git a/assets/demo_fig.png b/assets/demo_fig.png deleted file mode 100644 index 867ea5b..0000000 Binary files a/assets/demo_fig.png and /dev/null differ diff --git a/assets/framework.png b/assets/framework.png deleted file mode 100644 index 2cad768..0000000 Binary files a/assets/framework.png and /dev/null differ diff --git a/assets/teaser.png b/assets/teaser.png deleted file mode 100644 index 79c5f16..0000000 Binary files a/assets/teaser.png and /dev/null differ diff --git a/data/README.md b/data/README.md deleted file mode 100644 index 35a0bfa..0000000 --- a/data/README.md +++ /dev/null @@ -1,101 +0,0 @@ -### Prepare EmbodiedScan Data - -Given the licenses of respective raw datasets, we recommend users download the raw data from their official websites and then organize them following the below guide. -Detailed steps are shown as follows. - -1. Download ScanNet v2 data [HERE](https://github.com/ScanNet/ScanNet). Link or move the folder to this level of directory. - -2. Download 3RScan data [HERE](https://github.com/WaldJohannaU/3RScan). Link or move the folder to this level of directory. - -3. Download Matterport3D data [HERE](https://github.com/niessner/Matterport). Link or move the folder to this level of directory. - -4. Download ARKitScenes data [HERE](https://github.com/apple/ARKitScenes). Link or move the folder to this level of directory. - -5. Download EmbodiedScan data and extract it here. Currently, please fill in the [form](https://docs.google.com/forms/d/e/1FAIpQLScUXEDTksGiqHZp31j7Zp7zlCNV7p_08uViwP_Nbzfn3g6hhw/viewform?usp=sf_link), and we will reply with the data download link. - -The directory structure should be as below. - -``` -data -├── scannet -│ ├── scans -│ │ ├── -│ │ ├── ... -├── 3rscan -│ ├── -│ ├── ... -├── matterport3d -│ ├── -│ ├── ... -├── arkitscenes -│ ├── Training -│ | ├── -│ | ├── ... -│ ├── Validation -│ | ├── -│ | ├── ... -├── embodiedscan_occupancy -├── embodiedscan_infos_train.pkl -├── embodiedscan_infos_val.pkl -├── embodiedscan_infos_test.pkl -├── embodiedscan_train_vg.json -├── embodiedscan_val_vg.json -├── embodiedscan_test_vg.json -├── embodiedscan_train_mini_vg.json (mini set) -├── embodiedscan_val_mini_vg.json (mini set) -├── embodiedscan_train_vg_all.json (w/ complex prompts) -├── embodiedscan_val_vg_all.json (w/ complex prompts) -``` - -5. Enter the project root directory, extract images by running - -```bash -python embodiedscan/converter/generate_image_scannet.py --dataset_folder data/scannet/ -# generate_image_scannet.py can be very slow because it extracts images from .sens files. Add --fast to generate only images used by embodiedscan. -python embodiedscan/converter/generate_image_3rscan.py --dataset_folder data/3rscan/ -``` - -The directory structure should be as below after that - -``` -data -├── scannet -│ ├── scans -│ │ ├── -│ │ ├── ... -│ ├── posed_images -│ │ ├── -│ │ | ├── *.jpg -│ │ | ├── *.png -│ │ ├── ... -├── 3rscan -│ ├── -│ │ ├── sequence -│ │ | ├── *.color.jpg -│ │ | ├── *.depth.pgm -│ ├── ... -├── matterport3d -│ ├── -│ ├── ... -├── arkitscenes -│ ├── Training -│ | ├── -│ | ├── ... -│ ├── Validation -│ | ├── -│ | ├── ... -├── embodiedscan_occupancy -├── embodiedscan_infos_train.pkl -├── embodiedscan_infos_val.pkl -├── embodiedscan_infos_test.pkl -├── embodiedscan_train_vg.json -├── embodiedscan_val_vg.json -├── embodiedscan_train_mini_vg.json -├── embodiedscan_val_mini_vg.json -``` - -6. Also extract EmbodiedScan occupancy annotations here by running - -```bash -python embodiedscan/converter/extract_occupancy_ann.py --src data/embodiedscan_occupancy --dst data -``` diff --git a/data_preparation/README.md b/data_preparation/README.md new file mode 100644 index 0000000..4273267 --- /dev/null +++ b/data_preparation/README.md @@ -0,0 +1,51 @@ +### Prepare MMscan info files. + +Given the licenses of respective raw datasets, we recommend users download the raw data from their official websites and then organize them following the below guide. +Detailed steps are shown as follows. + +1. Download ScanNet v2 data [HERE](https://github.com/ScanNet/ScanNet). Link or move the folder to this level of directory. + +2. Download 3RScan data [HERE](https://github.com/WaldJohannaU/3RScan). Link or move the folder to this level of directory. + +3. Download Matterport3D data [HERE](https://github.com/niessner/Matterport). Link or move the folder to this level of directory. + +4. Organize the file structure. You are recommanded to create a soft link to the raw data folder under `mmscan_data/embodiedscan_split/data`. + + ``` + mmscan_data/embodiedscan_split/data/ + ├── scannet/ + │ ├── scans + │ │ ├── + │ │ ├── ... + ├── 3rscan/ + │ ├── + │ ├── ... + ├── matterport3d/ + │ ├── + │ ├── ... + ``` + + Additionally, create a `process_pcd` folder under `mmscan_data/embodiedscan_split` to store the results. Similarly, we recommend using a symbolic link, as the total file size might be a little large (approximately 21GB) + + PS: If you have followed the embodiedscan tutorial to organize the data, you can skip these steps and link or copy the `data` folder to + `mmscan_data/embodiedscan_split`. + + After all the raw data is organized, the directory structure should be as below: + + ``` + mmscan_data + ├── embodiedscan_split/ + │ ├── data/ + │ ├── process_pcd/ + │ ├── embodiedscan-v1/ + │ ├── embodiedscan-v2/ + ├── MMScan-beta-release + + ``` + +5. Read raw files and generate processed point cloud files, by running the following scripts. + + ```bash + python process_all_scan.py --nproc 8 + # If your various file directories do not match the configuration settings, define them using -- + ``` diff --git a/data_preparation/meta_data/3rscan_matrix.npy b/data_preparation/meta_data/3rscan_matrix.npy new file mode 100644 index 0000000..ca6e3e8 Binary files /dev/null and b/data_preparation/meta_data/3rscan_matrix.npy differ diff --git a/data_preparation/meta_data/all_scan.json b/data_preparation/meta_data/all_scan.json new file mode 100644 index 0000000..dd2838d --- /dev/null +++ b/data_preparation/meta_data/all_scan.json @@ -0,0 +1 @@ +["scene0146_02", "1mp3d_0064_region3", "scene0329_00", "scene0468_02", "751a5590-fe61-2c3b-8dd8-414f615e5a97", "1mp3d_0069_region48", "1mp3d_0018_region2", "scene0478_01", "bf9a3dcd-45a5-2e80-8071-e4eeb6596460", "1mp3d_0065_region14", "5341b7e9-8a66-2cdd-86a9-62cf7da55c0a", "1mp3d_0075_region1", "scene0000_00", "c7895f5d-339c-2d13-8003-0763dfb1b23c", "scene0309_00", "scene0501_01", "1mp3d_0033_region22", "scene0468_00", "b05fdd8a-fca0-2d4f-8ac5-f6ae697787f5", "b05fdda6-fca0-2d4f-8973-9f8a5cdac3ad", "scene0675_00", "scene0673_02", "scene0448_02", "scene0224_00", "scene0044_01", "scene0344_00", "scene0160_00", "scene0387_00", "scene0146_00", "scene0342_00", "scene0016_01", "scene0042_01", "scene0494_00", "scene0458_01", "scene0098_01", "scene0112_02", "scene0160_02", "scene0293_00", "scene0451_00", "scene0611_01", "scene0250_02", "scene0362_02", "scene0398_00", "scene0423_02", "scene0655_02", "scene0320_03", "scene0511_00", "scene0124_01", "scene0183_00", "scene0114_00", "scene0508_00", "scene0159_00", "scene0195_01", "scene0270_00", "scene0703_01", "scene0686_01", "scene0362_00", "scene0543_00", "scene0036_01", "scene0000_02", "scene0655_00", "scene0166_00", "scene0310_00", "scene0316_00", "scene0517_00", "scene0088_00", "scene0039_00", "scene0006_00", "scene0295_00", "scene0074_02", "scene0185_00", "scene0054_00", "scene0618_00", "scene0169_01", "scene0132_02", "scene0029_01", "scene0596_01", "scene0553_01", "scene0329_02", "scene0114_02", "scene0457_02", "scene0397_01", "scene0087_01", "scene0575_01", "scene0508_02", "scene0573_01", "scene0448_00", "scene0212_01", "scene0019_00", "scene0387_02", "scene0122_01", "scene0381_00", "scene0415_01", "scene0663_01", "scene0563_00", "scene0425_00", "scene0423_00", "scene0580_00", "scene0381_02", "scene0628_01", "scene0599_00", "scene0451_02", "scene0300_01", "scene0589_01", "scene0134_00", "scene0492_00", "scene0388_01", "scene0249_00", "scene0471_00", "scene0352_01", "scene0260_01", "scene0537_00", "scene0062_01", "scene0091_00", "scene0545_00", "scene0222_00", "scene0627_00", "scene0052_00", "scene0680_01", "scene0276_00", "scene0052_02", "scene0608_01", "scene0696_00", "scene0232_01", "scene0638_00", "scene0435_03", "scene0477_00", "scene0088_02", "scene0009_01", "scene0471_02", "scene0364_00", "scene0269_00", "scene0599_02", "scene0665_01", "scene0586_00", "scene0193_01", "scene0690_00", "scene0214_01", "scene0405_00", "scene0266_01", "scene0543_02", "scene0601_00", "scene0528_00", "scene0435_01", "scene0150_01", "scene0250_00", "scene0072_02", "scene0645_01", "scene0336_00", "scene0030_01", "scene0517_02", "scene0256_02", "scene0320_01", "scene0269_02", "scene0097_00", "scene0590_01", "scene0631_01", "scene0279_01", "scene0447_01", "scene0482_01", "scene0006_02", "scene0705_01", "scene0621_00", "scene0565_00", "scene0310_02", "scene0074_00", "scene0673_00", "scene0102_01", "scene0010_01", "scene0630_05", "scene0064_01", "scene0072_00", "scene0166_02", "scene0259_01", "scene0140_00", "scene0545_02", "scene0270_02", "scene0112_00", "scene0179_00", "scene0081_01", "scene0204_02", "scene0689_00", "scene0132_00", "scene0457_00", "scene0586_02", "scene0484_01", "scene0204_00", "scene0020_00", "scene0607_00", "scene0696_02", "scene0403_00", "scene0653_00", "scene0026_00", "scene0306_01", "scene0170_01", "scene0531_00", "scene0256_00", "scene0134_02", "scene0330_00", "scene0202_00", "1mp3d_0002_region9", "1mp3d_0039_region49", "1mp3d_0039_region70", "1mp3d_0012_region38", "1mp3d_0000_region17", "1mp3d_0069_region30", "1mp3d_0060_region10", "1mp3d_0008_region12", "1mp3d_0023_region1", "1mp3d_0016_region3", "1mp3d_0066_region27", "1mp3d_0069_region32", "1mp3d_0040_region58", "1mp3d_0036_region10", "1mp3d_0032_region1", "1mp3d_0035_region23", "1mp3d_0062_region30", "1mp3d_0018_region0", "1mp3d_0003_region10", "1mp3d_0035_region15", "1mp3d_0036_region12", "1mp3d_0036_region24", "1mp3d_0045_region9", "1mp3d_0003_region24", "1mp3d_0013_region10", "1mp3d_0026_region9", "1mp3d_0067_region2", "1mp3d_0076_region0", "1mp3d_0005_region11", "1mp3d_0044_region42", "1mp3d_0040_region57", "1mp3d_0033_region16", "1mp3d_0069_region45", "1mp3d_0013_region4", "1mp3d_0069_region1", "1mp3d_0066_region13", "1mp3d_0048_region9", "1mp3d_0041_region33", "1mp3d_0036_region26", "1mp3d_0044_region40", "1mp3d_0045_region10", "1mp3d_0050_region14", "1mp3d_0013_region29", "1mp3d_0065_region16", "1mp3d_0001_region8", "1mp3d_0018_region10", "1mp3d_0017_region33", "1mp3d_0061_region9", "1mp3d_0043_region25", "1mp3d_0004_region0", "1mp3d_0005_region25", "1mp3d_0073_region5", "1mp3d_0082_region3", "1mp3d_0073_region8", "1mp3d_0065_region22", "1mp3d_0026_region4", "1mp3d_0010_region7", "1mp3d_0038_region19", "1mp3d_0035_region21", "1mp3d_0026_region10", "1mp3d_0007_region1", "1mp3d_0039_region72", "1mp3d_0043_region11", "1mp3d_0040_region20", "1mp3d_0073_region15", "1mp3d_0084_region7", "1mp3d_0081_region0", "1mp3d_0045_region12", "1mp3d_0060_region12", "1mp3d_0026_region24", "1mp3d_0016_region16", "1mp3d_0084_region5", "1mp3d_0029_region31", "1mp3d_0062_region7", "1mp3d_0044_region38", "1mp3d_0001_region47", "1mp3d_0069_region47", "1mp3d_0076_region11", "1mp3d_0084_region8", "1mp3d_0039_region5", "1mp3d_0059_region4", "1mp3d_0043_region13", "1mp3d_0078_region1", "1mp3d_0018_region12", "1mp3d_0046_region8", "1mp3d_0039_region44", "1mp3d_0047_region30", "1mp3d_0043_region28", "1mp3d_0026_region6", "1mp3d_0001_region48", "1mp3d_0038_region14", "1mp3d_0013_region26", "1mp3d_0045_region6", "1mp3d_0010_region17", "1mp3d_0052_region2", "1mp3d_0064_region1", "1mp3d_0001_region45", "1mp3d_0048_region11", "1mp3d_0012_region37", "1mp3d_0050_region16", "1mp3d_0001_region7", "1mp3d_0013_region9", "1mp3d_0013_region24", "1mp3d_0001_region32", "1mp3d_0040_region63", "1mp3d_0038_region22", "1mp3d_0033_region20", "1mp3d_0046_region7", "1mp3d_0025_region7", "1mp3d_0001_region30", "1mp3d_0040_region55", "1mp3d_0044_region37", "1mp3d_0004_region2", "1mp3d_0005_region28", "1mp3d_0050_region20", "1mp3d_0057_region7", "1mp3d_0006_region14", "1mp3d_0066_region11", "1mp3d_0035_region18", "1mp3d_0065_region19", "1mp3d_0010_region5", "1mp3d_0010_region15", "1mp3d_0016_region1", "1mp3d_0043_region0", "1mp3d_0057_region5", "1mp3d_0039_region46", "1mp3d_0025_region8", "1mp3d_0059_region6", "1mp3d_0002_region4", "1mp3d_0061_region6", "1mp3d_0040_region3", "1mp3d_0039_region33", "1mp3d_0045_region4", "1mp3d_0039_region31", "1mp3d_0013_region6", "1mp3d_0035_region17", "1mp3d_0073_region18", "1mp3d_0030_region11", "1mp3d_0065_region20", "1mp3d_0007_region3", "1mp3d_0033_region19", "1mp3d_0001_region5", "1mp3d_0069_region3", "1mp3d_0062_region8", "1mp3d_0076_region2", "1mp3d_0073_region7", "1mp3d_0025_region15", "1mp3d_0066_region25", "1mp3d_0038_region20", "1mp3d_0005_region27", "1mp3d_0040_region19", "1mp3d_0008_region10", "1mp3d_0078_region3", "1mp3d_0013_region12", "1mp3d_0081_region2", "1mp3d_0059_region9", "1mp3d_0040_region61", "1mp3d_0075_region3", "1mp3d_0057_region8", "1mp3d_0040_region22", "1mp3d_0026_region12", "1mp3d_0030_region13", "1mp3d_0067_region0", "1mp3d_0040_region1", "1mp3d_0046_region5", "1mp3d_0076_region13", "1mp3d_0048_region13", "1mp3d_0073_region17", "1mp3d_0003_region26", "1mp3d_0009_region0", "1mp3d_0043_region27", "1mp3d_0048_region4", "1mp3d_0052_region0", "1mp3d_0082_region1", "1mp3d_0006_region19", "1mp3d_0000_region15", "1mp3d_0002_region6", "1mp3d_0066_region28", "1mp3d_0061_region4", "1mp3d_0050_region19", "1mp3d_0023_region16", "1mp3d_0040_region14", "1mp3d_0006_region16", "1mp3d_0044_region35", "1mp3d_0040_region16", "1mp3d_0032_region3", "1mp3d_0038_region16", "1mp3d_0012_region40", "1mp3d_0009_region2", "1mp3d_0017_region31", "1mp3d_0048_region6", "1mp3d_0039_region8", "1mp3d_0025_region5", "1mp3d_0043_region2", "1mp3d_0012_region42", "1mp3d_0010_region8", "1mp3d_0012_region35", "1mp3d_0039_region7", "1mp3d_0003_region12", "1mp3d_0023_region14", "1mp3d_0029_region33", "1mp3d_0022_region31", "1mp3d_0023_region3", "1mp3d_0026_region26", "1mp3d_0016_region14", "1mp3d_0026_region29", "1mp3d_0062_region5", "1mp3d_0033_region14", "9c27de4d-6184-2cda-80c6-174eddb07154", "a090600e-66f7-2272-9cfe-63b80d55da17", "ab835f7d-54c6-29a1-9aae-bf97735dd235", "bf9a3da0-45a5-2e80-8099-e46f3ec15e14", "75c2599b-9ca2-2844-9683-a503346d840a", "569d8f0f-72aa-2f24-89a6-77f8b8779ae9", "1c211544-f201-2d25-85bb-7569eac370e9", "77361fc4-d054-2a22-899f-58d6462c9d6a", "5341b790-8a66-2cdd-8472-90e738e30995", "c12890da-d3df-2d0d-862f-db6f9df19711", "c7895f27-339c-2d13-836b-c12dca280261", "6bde60af-9162-246f-8f0e-7b3e8cb00aaf", "0958220d-e2c2-2de1-9710-c37018da1883", "c7895f63-339c-2d13-81a3-0b07b1eb23b4", "d7d40d42-7a5d-2b36-9602-71e348ded22e", "b05fdd66-fca0-2d4f-88bb-210da4439475", "a8952591-9035-254b-8f0a-d23a65253b5f", "751a557f-fe61-2c3b-8f60-a1ba913060c4", "752cc5a5-920c-26f5-8ef1-aac2937ab135", "55551077-36f1-29c0-89ec-2e7690991cb2", "569d8f18-72aa-2f24-8bb2-fb8211412783", "0cac75de-8d6f-2d13-8e1a-b574569c3885", "8eabc449-5af7-2f32-8669-f776ccecc6fa", "13124cad-cec3-27a6-857f-914a832a87c3", "6bde60bc-9162-246f-8e50-ca5054c79322", "77361fd2-d054-2a22-88bd-8b14f5969890", "c7895f21-339c-2d13-8376-d703f09e7b3b", "4e858c9b-fd93-2cb4-851f-b65dcef6e6e1", "6bde609b-9162-246f-8f90-c3d2444a5ab8", "c92fb578-f771-2064-85fc-485dbfba73df", "c7895f33-339c-2d13-83d1-720cfdc0c4b1", "c92fb5a0-f771-2064-8631-ec197369e100", "5341b7c7-8a66-2cdd-86f4-c9dadf37c419", "352e9c40-69fb-27a7-8a6d-d2ca32644e09", "8eabc43a-5af7-2f32-85d7-b62167a48e58", "751a558e-fe61-2c3b-8d62-3b2ddfeb7758", "1776ad7e-4db7-2333-89e1-66854e82170c", "0d7054f5-4c6e-2ab6-844f-c1328fe572bf", "9766cbe5-6321-2e2f-8040-4e5b7a5d8ba1", "87e6cf6f-9d1a-289f-8693-db8b73a4c4f4", "ee527b51-0df9-2dae-829e-a0543a6e4074", "5341b797-8a66-2cdd-8456-9d0e8ec333ec", "c7895f1f-339c-2d13-8172-b32de97f8cce", "6bde60b7-9162-246f-8e93-2738e71ccafd", "1d23400a-e280-2b1a-8e83-f7344ba74475", "1776ad76-4db7-2333-8ab2-6de6191f3d11", "8f0f1469-55de-28ce-81e1-3777a48d711e", "4fbad314-465b-2a5d-8445-9d021f278c1e", "fcf66da8-622d-291c-8565-c44cf20e39b9", "787ed58c-9d98-2c97-83b9-b48a609ace15", "48005c65-7d67-29ec-85e0-6a925eb15a27", "13124cc0-cec3-27a6-84bd-1f56cb6d11a8", "210cdba9-9e8d-2832-862b-b37293fcc0e1", "6bde6063-9162-246f-8fea-cf3292226c38", "d7d40d77-7a5d-2b36-9576-25354ef37dd9", "38770ca5-86d7-27b8-871c-57fdbfe87905", "0cac764c-8d6f-2d13-8fec-15edfc3e53ad", "47319772-f9f7-2a1a-9768-108df4f37ab9", "0cac75a1-8d6f-2d13-8dab-0ecc3b317dc7", "0ad2d391-79e2-2212-98a3-7e2440884395", "fcf66d98-622d-291c-851a-a16c117a91bb", "1abf0914-a3da-2be8-8014-c0e3f94d64f0", "82def5da-25ba-20f9-8810-71be905b83db", "0cac7662-8d6f-2d13-8e2c-189aab269bae", "185d741d-3698-223c-8837-9dafd682c7f0", "5555108b-36f1-29c0-8bdd-1e94534bc134", "ba6fdaa8-a4c1-2dca-814b-4dd6cf0ed226", "ad408c9f-84db-2095-8acf-bdfbe88fbf78", "6bde6072-9162-246f-8fa1-71ec9bc5ebeb", "b1d87fac-e72e-2c8c-9ef9-82e3277c3c35", "6bde604b-9162-246f-8fb2-2dea80e7fb4c", "b1d87fae-e72e-2c8c-9cf3-c927fdb7c685", "43b8cae7-6678-2e38-9986-dc6f18ac7bb0", "77361fd0-d054-2a22-8b5a-9d2acada2031", "fcf66da6-622d-291c-8685-46cd96a8af4e", "ba6fdab4-a4c1-2dca-83e2-f5878fdf688a", "1c211554-f201-2d25-8680-513a91547c2f", "5104a9cd-adc4-2a85-91f5-8030e69ee955", "bcb0fe33-4f39-2c70-9cb1-081014d8a9b8", "6bde60e0-9162-246f-8df9-b07dc8fa8ddf", "8eabc407-5af7-2f32-86af-6e962fa7cebd", "422885dc-192d-25fc-857a-0c4af3695e4b", "b1d87fb6-e72e-2c8c-9eb0-c7a40f6f4877", "0cac75ee-8d6f-2d13-8f1f-5f13d3b59ce3", "10b1796b-3938-2467-88bc-c224c21bc91c", "1d234008-e280-2b1a-8cc9-c573097cd272", "baf67393-8e94-22f8-8198-961bc5ea7f20", "6bde60a9-9162-246f-8f1a-2441db12c4d1", "7747a502-9431-24e8-8456-5c6bc3e2eea3", "8eabc42c-5af7-2f32-87c4-bf646779aa62", "f62fd5fa-9a3f-2f44-8b04-9c754f6a5a8d", "cdcaf5bf-ddd8-2ed6-971c-d8e07f06e98f", "422885af-192d-25fc-8651-420062adb475", "77361fc2-d054-2a22-8ae5-c56022c5e974", "5630cfd8-12bf-2860-8773-e3dde9da2aff", "1d233ffa-e280-2b1a-8e92-2540f61e5501", "02b33dfb-be2b-2d54-92d2-cd012b2b3c40", "8eabc45f-5af7-2f32-8528-640861d2a135", "0cac760d-8d6f-2d13-8ea2-109ce4da9ac9", "0cac7532-8d6f-2d13-8cea-1e70d5ae4856", "ddc7379b-765b-241a-9f45-c37e41608726", "73315a31-185c-2c8a-8559-79c05e1e8b6f", "0958222f-e2c2-2de1-94fd-986561908cee", "501ebf0f-a3bb-263f-867d-7c2af21b54d0", "e83fe657-2171-26c8-8c64-199f0871f98d", "ad408cad-84db-2095-8be8-68c0f94d8d14", "0cac7574-8d6f-2d13-8db6-4304f437e6d5", "63b87cf1-ef3f-28f2-871a-c1551f129ce6", "74ef8470-9dce-2d66-8339-4b51b8406cef", "b8837e3f-57ec-29c6-89a2-b2417e091d50", "10b17951-3938-2467-881b-3bcb3b80bad3", "6bde604d-9162-246f-8d7d-631c1e992162", "43b8caed-6678-2e38-98f8-a76f51ef79af", "e48c48fb-8b2f-2858-890d-fb49b3db5573", "3b7b33b1-1b11-283e-9ac0-8843e072d7d5", "13124cc6-cec3-27a6-86dc-7f23b894c85c", "0cac75a7-8d6f-2d13-8fdc-083ff44d10fb", "b8837e2e-57ec-29c6-8896-de41de986b5d", "8f0f1461-55de-28ce-821c-d2eeaa217cc1", "aa202790-8bc8-2c5b-9d03-129b42efee05", "c92fb588-f771-2064-840a-17748e8c20ad", "ad408cb3-84db-2095-8930-d8244e349008", "b8837e1a-57ec-29c6-8a01-dec1dcb87460", "210cdbbd-9e8d-2832-87c1-a1a2c71cde80", "4e858c91-fd93-2cb4-85e4-02c198efe788", "f62fd5f4-9a3f-2f44-8b81-246b93170189", "bf9a3dcf-45a5-2e80-8196-c38271d91bcf", "634d11d5-6833-255d-8d0a-c0da83387d01", "d7d40d50-7a5d-2b36-9446-7d636174329f", "6bde606a-9162-246f-8d00-6f12f032e27d", "6a36052f-fa53-2915-9400-831b60c63077", "7ab2a9c5-ebc6-2056-89c7-920e98f0cf5a", "b8837e21-57ec-29c6-891b-6b94255462c3", "0cac7635-8d6f-2d13-8ea5-25f4dc197d1d", "355465d2-d29b-29f9-9460-4c597924ac63", "0cac75fc-8d6f-2d13-8c59-13d52e366f38", "6bde6043-9162-246f-8e11-613aba0df55c", "5341b79b-8a66-2cdd-84cd-d6dfbb614837", "8eabc424-5af7-2f32-8624-5cba04f952bc", "422885c3-192d-25fc-844a-645180810bfd", "0cac762d-8d6f-2d13-8c70-b523afdf6989", "b05fddd1-fca0-2d4f-8ab9-ecffab72e046", "9af05c68-5794-2e19-8c5a-979f448da545", "6bde60a1-9162-246f-8c51-a147225db6bd", "752cc597-920c-26f5-8c1b-a8a5c90a21d7", "f38169c7-378c-2a65-8543-3c7481e856fe", "50e7c0ad-0730-2a5f-8635-252404ee82e0", "09582219-e2c2-2de1-9534-519142703037", "0cac7540-8d6f-2d13-8eee-36ba2a428e3f", "55551095-36f1-29c0-8832-90e4ea43176b", "352e9c3e-69fb-27a7-8976-a6860f84f876", "8eabc428-5af7-2f32-8659-97a21d90a06b", "7747a510-9431-24e8-8705-907ee78be2a2", "d7d40d48-7a5d-2b36-97ad-692c9b56b508", "0cac75d0-8d6f-2d13-8c26-d771a31c3f50", "6bde606c-9162-246f-8dbf-b0334f524501", "10b1792a-3938-2467-8b4e-a93da27a0985", "b8837e2c-57ec-29c6-89f3-98eb20a959bb", "22312380-b828-2c09-92f8-061e80aafe34", "3544f968-0327-249b-909d-83e3fb0f7bfc", "ad408caf-84db-2095-88ac-c955accacd31", "4138582f-a238-2435-8332-6902542c2823", "4a9a43e8-7736-2874-8519-29915757c372", "352e9c48-69fb-27a7-8a35-3dbf699637c8", "0ad2d38f-79e2-2212-98d2-9b5060e5e9b5", "09582237-e2c2-2de1-9758-862dc7a3d009", "c6707941-2ecb-2de2-8236-13d2d850cf65", "10b1792c-3938-2467-8b57-bc0b18bc6b13", "c7895f6d-339c-2d13-8078-90c61bd6f685", "d7d40d35-7a5d-2b36-9739-6072a7a25907", "d63767be-3205-226c-9a89-3a27969053a7", "1mp3d_0062_region14", "scene0498_02", "1mp3d_0061_region11", "scene0121_02", "c12890e3-d3df-2d0d-87cf-a5510bc39c3a", "1mp3d_0042_region29", "1mp3d_0001_region57", "scene0534_01", "scene0706_00", "4acaebc0-6c10-2a2a-852e-0226d6539299", "scene0371_00", "fcf66d88-622d-291c-871f-699b2d063630", "1mp3d_0040_region47", "10b17963-3938-2467-8a48-0d4af350ce92", "1mp3d_0042_region26", "1mp3d_0060_region7", "scene0348_02", "scene0464_00", "scene0347_01", "scene0228_00", "scene0092_01", "scene0107_00", "scene0231_02", "scene0077_01", "scene0051_03", "scene0504_00", "scene0341_01", "scene0013_00", "scene0067_00", "scene0005_01", "scene0444_00", "scene0368_00", "scene0173_02", "scene0595_00", "scene0632_00", "scene0695_03", "scene0255_01", "scene0614_02", "scene0118_02", "scene0313_01", "scene0498_00", "scene0442_00", "scene0392_00", "scene0377_02", "scene0173_00", "scene0426_01", "scene0646_00", "scene0078_00", "scene0280_00", "scene0695_01", "scene0640_02", "scene0289_01", "scene0583_01", "scene0165_01", "scene0273_01", "scene0693_01", "scene0013_02", "scene0487_00", "scene0003_01", "scene0676_01", "scene0419_01", "scene0614_00", "scene0208_00", "scene0155_00", "scene0163_01", "scene0084_02", "scene0138_00", "scene0280_02", "scene0015_00", "scene0101_02", "scene0263_00", "scene0367_01", "scene0514_01", "scene0048_01", "scene0127_00", "scene0426_03", "scene0047_00", "scene0502_00", "scene0604_01", "scene0286_00", "scene0683_00", "scene0041_00", "scene0033_00", "scene0474_03", "scene0231_00", "scene0669_01", "scene0700_00", "scene0348_00", "scene0189_00", "scene0540_01", "scene0078_02", "scene0358_01", "scene0201_01", "scene0570_00", "scene0058_00", "scene0416_00", "scene0556_00", "scene0082_00", "scene0351_00", "scene0576_00", "scene0335_01", "scene0700_02", "scene0175_00", "scene0218_01", "scene0121_00", "scene0585_01", "scene0067_02", "scene0420_01", "scene0186_01", "scene0057_01", "scene0137_01", "scene0382_01", "scene0593_00", "scene0569_00", "scene0679_00", "scene0685_00", "scene0190_00", "scene0092_03", "scene0430_00", "scene0685_02", "scene0143_01", "scene0217_00", "scene0361_01", "scene0394_00", "scene0488_01", "scene0640_00", "scene0559_01", "scene0502_02", "scene0670_01", "scene0305_00", "scene0550_00", "scene0406_01", "scene0666_00", "scene0101_00", "scene0416_02", "scene0357_00", "scene0051_01", "scene0221_01", "scene0118_00", "scene0646_02", "scene0377_00", "scene0452_01", "scene0660_00", "scene0410_00", "scene0211_00", "scene0025_01", "scene0622_01", "scene0243_00", "scene0068_01", "scene0481_00", "scene0579_01", "scene0084_00", "scene0378_01", "scene0474_01", "scene0524_00", "scene0570_02", "scene0323_00", "scene0325_00", "scene0265_00", "scene0303_02", "scene0532_01", "scene0656_01", "scene0265_02", "scene0612_00", "scene0153_00", "scene0196_00", "scene0400_01", "scene0659_00", "scene0131_01", "scene0439_01", "scene0299_00", "scene0035_00", "scene0245_00", "scene0155_02", "scene0409_00", "scene0436_00", "scene0211_02", "scene0061_00", "scene0634_00", "scene0505_04", "scene0237_00", "scene0429_00", "scene0656_03", "scene0392_02", "scene0649_01", "scene0111_01", "scene0303_00", "scene0576_02", "scene0522_00", "scene0549_00", "scene0666_02", "scene0462_00", "scene0238_01", "scene0296_01", "scene0472_01", "scene0286_02", "scene0207_01", "1mp3d_0038_region30", "1mp3d_0065_region2", "1mp3d_0039_region54", "1mp3d_0040_region48", "1mp3d_0044_region11", "1mp3d_0001_region58", "1mp3d_0002_region13", "1mp3d_0060_region5", "1mp3d_0030_region3", "1mp3d_0047_region19", "1mp3d_0066_region3", "1mp3d_0060_region8", "1mp3d_0013_region39", "1mp3d_0057_region22", "1mp3d_0041_region21", "1mp3d_0041_region15", "1mp3d_0057_region14", "1mp3d_0030_region1", "1mp3d_0042_region24", "1mp3d_0062_region22", "1mp3d_0062_region19", "1mp3d_0027_region25", "1mp3d_0069_region14", "1mp3d_0041_region2", "1mp3d_0083_region0", "1mp3d_0065_region0", "1mp3d_0003_region7", "1mp3d_0029_region18", "1mp3d_0035_region6", "1mp3d_0041_region0", "1mp3d_0064_region21", "1mp3d_0017_region17", "1mp3d_0004_region24", "1mp3d_0039_region60", "1mp3d_0077_region12", "1mp3d_0014_region26", "1mp3d_0062_region20", "1mp3d_0001_region19", "1mp3d_0071_region7", "1mp3d_0017_region23", "1mp3d_0033_region0", "1mp3d_0067_region26", "1mp3d_0027_region27", "1mp3d_0017_region15", "1mp3d_0047_region14", "1mp3d_0017_region2", "1mp3d_0071_region5", "1mp3d_0038_region6", "1mp3d_0035_region44", "1mp3d_0012_region5", "1mp3d_0012_region28", "1mp3d_0039_region15", "1mp3d_0017_region0", "1mp3d_0066_region1", "1mp3d_0042_region10", "1mp3d_0027_region13", "1mp3d_0050_region2", "1mp3d_0008_region3", "1mp3d_0029_region9", "1mp3d_0009_region11", "1mp3d_0011_region14", "1mp3d_0027_region5", "1mp3d_0077_region3", "1mp3d_0042_region3", "1mp3d_0039_region17", "1mp3d_0006_region0", "1mp3d_0012_region25", "1mp3d_0019_region3", "1mp3d_0064_region18", "1mp3d_0024_region6", "1mp3d_0027_region8", "1mp3d_0005_region35", "1mp3d_0014_region10", "1mp3d_0039_region18", "1mp3d_0061_region25", "1mp3d_0042_region1", "1mp3d_0071_region13", "1mp3d_0032_region21", "1mp3d_0000_region9", "1mp3d_0002_region11", "1mp3d_0071_region11", "1mp3d_0064_region23", "1mp3d_0039_region62", "1mp3d_0027_region11", "1mp3d_0019_region1", "1mp3d_0035_region31", "1mp3d_0069_region22", "1mp3d_0012_region27", "1mp3d_0061_region13", "1mp3d_0038_region32", "1mp3d_0047_region6", "1mp3d_0079_region14", "1mp3d_0002_region27", "1mp3d_0038_region9", "1mp3d_0022_region21", "1mp3d_0001_region16", "1mp3d_0040_region30", "1mp3d_0079_region0", "1mp3d_0039_region23", "1mp3d_0009_region13", "1mp3d_0022_region2", "1mp3d_0077_region10", "1mp3d_0014_region3", "1mp3d_0013_region43", "1mp3d_0059_region12", "1mp3d_0057_region19", "1mp3d_0017_region21", "1mp3d_0012_region11", "1mp3d_0072_region9", "1mp3d_0013_region36", "1mp3d_0011_region4", "1mp3d_0064_region17", "1mp3d_0035_region4", "1mp3d_0044_region13", "1mp3d_0074_region0", "1mp3d_0033_region30", "1mp3d_0022_region15", "1mp3d_0032_region15", "1mp3d_0004_region10", "1mp3d_0029_region17", "1mp3d_0012_region13", "1mp3d_0022_region17", "1mp3d_0036_region7", "1mp3d_0039_region21", "1mp3d_0040_region45", "1mp3d_0022_region0", "1mp3d_0067_region12", "1mp3d_0039_region56", "1mp3d_0042_region12", "1mp3d_0036_region8", "1mp3d_0059_region10", "1mp3d_0067_region29", "1mp3d_0029_region21", "1mp3d_0005_region38", "1mp3d_0005_region37", "1mp3d_0001_region20", "1mp3d_0001_region55", "1mp3d_0012_region7", "1mp3d_0032_region18", "1mp3d_0005_region1", "1mp3d_0011_region20", "1mp3d_0027_region7", "1mp3d_0005_region40", "1mp3d_0041_region18", "1mp3d_0062_region16", "1mp3d_0014_region1", "1mp3d_0044_region28", "1mp3d_0085_region4", "1mp3d_0033_region32", "1mp3d_0011_region16", "1mp3d_0077_region1", "1mp3d_0035_region49", "1mp3d_0024_region4", "1mp3d_0041_region23", "1mp3d_0014_region12", "1mp3d_0001_region14", "1mp3d_0050_region0", "1mp3d_0017_region18", "1mp3d_0044_region25", "1mp3d_0079_region2", "1mp3d_0022_region18", "1mp3d_0072_region4", "1mp3d_0012_region8", "1mp3d_0029_region6", "1mp3d_0047_region20", "1mp3d_0011_region19", "1mp3d_0022_region23", "1mp3d_0038_region4", "1mp3d_0001_region22", "1mp3d_0003_region8", "1mp3d_0029_region4", "1mp3d_0057_region20", "1mp3d_0057_region16", "1mp3d_0019_region11", "1mp3d_0014_region29", "1mp3d_0013_region41", "1mp3d_0013_region34", "1mp3d_0011_region6", "1mp3d_0044_region27", "1mp3d_0019_region13", "1mp3d_0035_region9", "1mp3d_0035_region46", "1mp3d_0069_region19", "1mp3d_0033_region2", "1mp3d_0000_region6", "1mp3d_0047_region4", "1mp3d_0006_region2", "1mp3d_0047_region9", "1mp3d_0069_region16", "1mp3d_0005_region3", "1mp3d_0044_region8", "1mp3d_0067_region10", "1mp3d_0074_region2", "1mp3d_0083_region2", "1mp3d_0032_region17", "1mp3d_0044_region7", "1mp3d_0035_region33", "1mp3d_0004_region12", "1mp3d_0039_region59", "1mp3d_0002_region28", "1mp3d_0064_region15", "1mp3d_0041_region17", "1mp3d_0069_region20", "1mp3d_0067_region24", "1mp3d_0003_region5", "1mp3d_0047_region16", "1mp3d_0032_region23", "1mp3d_0071_region8", "1mp3d_0044_region5", "1mp3d_0036_region5", "1mp3d_0002_region25", "1mp3d_0005_region42", "1mp3d_0072_region6", "1mp3d_0000_region4", "1mp3d_0029_region23", "1mp3d_0047_region22", "1mp3d_0029_region15", "1mp3d_0040_region32", "1mp3d_0008_region1", "1mp3d_0085_region6", "1mp3d_0011_region9", "8f0f142a-55de-28ce-81f2-5d5c683204b8", "0cac7584-8d6f-2d13-8df8-c05e4307b418", "c7895f1d-339c-2d13-820f-f998ab956707", "baf6739f-8e94-22f8-82fd-b0cc4f4da8ae", "80b8588f-4a8d-222f-8712-eaa02a5450a9", "751a5584-fe61-2c3b-8eb1-cfbcd57469dc", "1d2f8516-d757-207c-8e08-5fef80bf34d8", "50e7c0b1-0730-2a5f-8471-b240fa51b1d7", "10a993b0-b1ad-2404-85b7-3eac1e66b413", "7ab2a9c1-ebc6-2056-896d-09dd5328b781", "d7d40d62-7a5d-2b36-955e-86a394caeabb", "95be45dd-a558-22da-9de4-002d61e13deb", "fa79392d-7766-2d5c-87c0-b444b76eb266", "752cc589-920c-26f5-8ffe-8166547078c1", "c7895f6f-339c-2d13-830a-81f71c3bfb78", "634d11d3-6833-255d-8cb0-12c4fb3ea031", "0cac7576-8d6f-2d13-8f3c-f01f8f14022c", "a0905ff8-66f7-2272-9f85-f2fd1396d445", "dcb6a329-5526-23f1-9d81-7718f682269c", "c2d99341-1947-2fbf-817a-5aa9b44f724f", "742e8f19-be0a-294e-9eb6-50dc474b110e", "c54c6eff-7124-2bf6-88ba-a46f4e2e76b1", "0958220b-e2c2-2de1-96bc-739f09c1e8f8", "352e9c57-69fb-27a7-8bc7-4cf12e417e21", "10b17969-3938-2467-8b12-9a43fea68e63", "0cac765e-8d6f-2d13-8d01-8f28e99c016c", "4e858c93-fd93-2cb4-86d6-12f652012a40", "bf9a3dc9-45a5-2e80-80cc-29579f2928e6", "e2847ff5-a506-2b60-8767-25c70c889de3", "20c993bf-698f-29c5-8549-a69fd169c1e1", "531cff04-0021-28f6-8d49-e51826f43468", "6a360525-fa53-2915-97f3-1be9b98cddb3", "751a5594-fe61-2c3b-8d40-d10f380079b3", "a644cb9b-0ee5-2f66-9ee4-52027f9ba7d5", "baf673a1-8e94-22f8-824b-1139a8fc8bda", "d7d40d56-7a5d-2b36-970b-9aad70998d20", "4acaebc8-6c10-2a2a-8525-fe9c4b7f4b25", "4138585b-a238-2435-810b-9728fc989b2f", "5555108f-36f1-29c0-8b75-21a0bb8b1833", "4d3d82bc-8cf4-2e04-8007-e2f7fe679737", "6a360521-fa53-2915-94f6-8c3b9d084ee7", "0cac75f8-8d6f-2d13-8f8a-30d9464cad6e", "7747a506-9431-24e8-87d9-37a5654d41f4", "4a9a43d8-7736-2874-86fc-098deb94c868", "4e858c83-fd93-2cb4-864f-f127e87e0783", "6a36052b-fa53-2915-94fe-f6c908e61c52", "f62fd5f8-9a3f-2f44-8b1e-1289a3a61e26", "10b17957-3938-2467-88a5-9e9254930dad", "170725d4-9f1b-2d87-812a-b126916caa9f", "09582225-e2c2-2de1-9564-f6681ef5e511", "1776ad80-4db7-2333-8b18-f02ef42f3569", "6ed38500-7db9-2d45-810c-865e82827b54", "0958224c-e2c2-2de1-948b-4417ac5f2711", "c2d9934b-1947-2fbf-8133-76cf48000d74", "13124cc8-cec3-27a6-87e6-11de5c4a40dd", "185d7425-3698-223c-8ac8-78643cbdc3d1", "7f30f36c-42f9-27ed-87c6-23ceb65f1f9b", "2e4a3960-d452-21a0-9f23-4b5f2548b05f", "4d3d82b6-8cf4-2e04-830a-4303fa0e79c7", "2451c046-fae8-24f6-91fb-c25185c51c7b", "752cc576-920c-26f5-8f6b-040d544069b2", "6bde60ad-9162-246f-8cc8-71bfdd742b43", "baf673a6-8e94-22f8-8047-575f7de04f21", "717ad1f3-b350-2996-8897-3c57a14e3d05", "c7895f4c-339c-2d13-8383-527f405b18c3", "6bde60c2-9162-246f-8e01-7b0b9b12cca4", "5ed77dd8-c4f5-27a0-85c7-c9cfc4feb6dd", "cdcaf5c1-ddd8-2ed6-9553-bcaa06ad43da", "73315a2d-185c-2c8a-87e9-d8dfe07ae3cb", "0cac75dc-8d6f-2d13-8d08-9c497bd6acdc", "42384916-60a7-271e-9c1f-6722abc6495d", "137a8156-1db5-2cc0-80ff-73ceef09cc7c", "210cdbb2-9e8d-2832-87e5-7e474cf621ea", "20c993c1-698f-29c5-86b8-50a2a0907e2b", "10b17940-3938-2467-8a7a-958300ba83d3", "bf9a3dc5-45a5-2e80-8068-91ac8398b4cd", "b8837e3c-57ec-29c6-881d-aa1c1c08679e", "6a360543-fa53-2915-95a7-697af24d1d25", "09582207-e2c2-2de1-972c-225d968c2ab4", "8e0f1c37-9e28-2339-85b0-6a08741718f7", "422885e9-192d-25fc-87a9-7013fe4114f2", "a0906006-66f7-2272-9e46-51000ffe6623", "baf673b0-8e94-22f8-8136-0b6b5028de14", "2451c053-fae8-24f6-91f0-6bebca1fe741", "a090600c-66f7-2272-9d26-5a98b70d30c5", "569d8f07-72aa-2f24-8b3d-47a4ceb4b966", "501ebf0d-a3bb-263f-8483-2f0bc7e44767", "6bde60cb-9162-246f-8cf5-d04f7426e56f", "0958224e-e2c2-2de1-943b-38e36345e2e7", "0cac75e8-8d6f-2d13-8fc4-acdbf00437c8", "321c8680-a5a8-2a84-85c2-816a26d59516", "2e369525-e133-204c-93ab-2b23ce5ef16e", "dbeb4cee-faf9-2324-991e-79c1f7e0b908", "531cff12-0021-28f6-8d9c-2001f6dbbe05", "dcb6a32b-5526-23f1-9cbc-3d257a8096fa", "0cac75cc-8d6f-2d13-8ddf-7b3c05e8709c", "0958223d-e2c2-2de1-9683-a76a4276d84c", "a644cb93-0ee5-2f66-9efb-b16adfb14eff", "1713f857-5300-2104-85c7-5e6e0b50713c", "6bde6091-9162-246f-8ea8-fdfd6c0a7f77", "8e0f1c24-9e28-2339-840c-7e135addd26b", "feefd90b-9d00-2ce5-8119-b936443cc39b", "321c867e-a5a8-2a84-851a-818df115be05", "a0905fec-66f7-2272-9d99-205da6e213fe", "0cac7640-8d6f-2d13-8d50-e374f145f2b6", "787ed590-9d98-2c97-80db-89baaa605049", "a7616234-a0aa-2dea-80e1-69c253cfa9b2", "d7d40d5c-7a5d-2b36-9433-ee98b893988d", "210cdbab-9e8d-2832-85fa-87d12badb00e", "6bde6099-9162-246f-8d20-43caaaf32911", "e83fe655-2171-26c8-8eb2-b89a89af4928", "ab835f8b-54c6-29a1-9a7a-4770a0eb74ff", "10b1792e-3938-2467-8bb3-172148ae5a67", "c92fb58e-f771-2064-85ec-a3446ebee692", "20c9939f-698f-29c5-86fa-653fe33ff6fc", "6bde607b-9162-246f-8e65-76e3ef265504", "6a360527-fa53-2915-9649-f5c6c7eeeb01", "c12890c7-d3df-2d0d-8541-a72f5bc6a668", "8eabc453-5af7-2f32-859d-405eb6a2e0d0", "4a9a43e6-7736-2874-8723-08ae98eccd16", "752cc578-920c-26f5-8d8d-8a4239658074", "8eabc45c-5af7-2f32-85b8-7cb0efc7372d", "20c993cb-698f-29c5-86fd-1a409a569219", "ddc737af-765b-241a-9ea5-0a02dc0876f2", "8e0f1c42-9e28-2339-861e-2d29caae3f9a", "2e4a3962-d452-21a0-9fd2-7a325c600a49", "13124cb4-cec3-27a6-848c-7599f34ef5a8", "1d2f850c-d757-207c-8fba-60b90a7d4691", "422885bd-192d-25fc-8571-abff2237f383", "ab835f9d-54c6-29a1-9aa1-f481b67b4a6d", "0cac766c-8d6f-2d13-8cde-44c997b5e8a7", "752cc57f-920c-26f5-8e8d-1f9d567cffd7", "6a36054d-fa53-2915-94a6-7e44f4f7a6cf", "c92fb594-f771-2064-879b-cc598a9dabe5", "6bde60a7-9162-246f-8c6b-2f431eb7cb26", "b1d87fb0-e72e-2c8c-9f01-25dafeddf472", "7ab2a9c7-ebc6-2056-8973-f34c559f7e0d", "569d8f0b-72aa-2f24-8bfb-8159cfb5e7f4", "6bde609f-9162-246f-8c79-3b26507f2ffd", "f62fd5fd-9a3f-2f44-883a-1e5cf819608e", "5104a9c9-adc4-2a85-917e-92cb27d635fb", "8f0f1437-55de-28ce-828e-dbf210a7f472", "87e6cf71-9d1a-289f-8510-bddeda7aaad8", "c92fb57e-f771-2064-86e4-5f4c7c77a8c7", "aa20278c-8bc8-2c5b-9d18-016312e4463a", "210cdbc9-9e8d-2832-849c-9a10b4c87123", "6ed38506-7db9-2d45-826e-645bbd9614e6", "4d3d829e-8cf4-2e04-8318-b76f02d91c93", "5341b7a5-8a66-2cdd-8751-70b98263cb8d", "c92fb59e-f771-2064-8431-6a20d3396067", "6a360567-fa53-2915-9617-da8aa432752d", "1mp3d_0066_region18", "1d234026-e280-2b1a-8fe1-713e28269f4d", "1mp3d_0038_region26", "scene0672_00", "35788e22-696e-2374-8898-bc5d06109906", "scene0598_02", "scene0386_00", "scene0080_01", "8f0f1463-55de-28ce-80e5-d3294f7795ba", "c12890d1-d3df-2d0d-868c-f9a62ca423f7", "scene0516_00", "10b17948-3938-2467-8bb9-fcd621974410", "1mp3d_0078_region5", "752cc57d-920c-26f5-8db9-9831c97088fe", "a8952593-9035-254b-8f40-bc82e6bcbbb1", "ad408ca7-84db-2095-898d-5a3449578bb2", "scene0493_00", "scene0223_02", "1mp3d_0067_region30", "0ad2d39d-79e2-2212-99aa-1e95c5b94289", "scene0542_00", "1mp3d_0025_region11", "0cac7672-8d6f-2d13-8d84-a0418e452bb7", "68bae76c-3567-2f7c-827d-373035a2d942", "43b8caf1-6678-2e38-9ab4-0bdde7337aac", "scene0160_04", "scene0096_00", "scene0697_02", "scene0343_00", "scene0182_00", "scene0552_01", "scene0536_00", "scene0691_00", "scene0282_01", "scene0652_00", "scene0629_01", "scene0440_01", "scene0292_00", "scene0089_00", "scene0654_00", "scene0018_00", "scene0301_01", "scene0562_00", "scene0241_01", "scene0626_02", "scene0451_04", "scene0373_01", "scene0375_01", "scene0526_01", "scene0639_00", "scene0337_02", "scene0233_01", "scene0620_00", "scene0554_01", "scene0581_02", "scene0278_01", "scene0182_02", "scene0225_00", "scene0588_03", "scene0404_00", "scene0205_00", "scene0434_01", "scene0027_00", "scene0055_00", "scene0192_01", "scene0702_01", "scene0017_01", "scene0171_01", "scene0065_01", "scene0704_01", "scene0268_02", "scene0609_01", "scene0470_00", "scene0115_00", "scene0495_00", "scene0178_00", "scene0205_02", "scene0412_01", "scene0141_00", "scene0432_01", "scene0539_01", "scene0157_01", "scene0424_02", "scene0277_02", "scene0045_01", "scene0353_01", "scene0529_00", "scene0073_00", "scene0133_00", "scene0043_01", "scene0581_00", "scene0168_01", "scene0277_00", "scene0698_01", "scene0469_02", "scene0257_00", "scene0184_00", "scene0544_00", "scene0027_02", "scene0161_00", "scene0610_01", "scene0630_03", "scene0606_02", "scene0053_00", "scene0509_02", "scene0331_00", "scene0345_00", "scene0247_01", "scene0311_00", "scene0115_02", "scene0588_01", "scene0308_00", "scene0328_00", "scene0261_03", "scene0355_01", "scene0450_00", "scene0479_01", "scene0500_01", "scene0469_00", "scene0587_00", "scene0688_00", "scene0251_00", "scene0396_01", "scene0038_02", "scene0510_02", "scene0466_01", "scene0038_00", "scene0509_00", "scene0031_01", "scene0399_00", "scene0248_00", "scene0449_02", "scene0673_04", "scene0510_00", "scene0606_00", "scene0529_02", "scene0147_00", "scene0123_01", "scene0158_00", "scene0317_00", "scene0662_01", "scene0404_02", "scene0099_01", "scene0203_02", "scene0591_01", "scene0664_01", "scene0141_02", "scene0086_01", "scene0642_03", "scene0476_02", "scene0167_00", "scene0215_01", "scene0158_02", "scene0600_00", "scene0096_02", "scene0642_01", "scene0089_02", "scene0261_01", "scene0449_00", "scene0105_01", "scene0402_00", "scene0294_00", "scene0609_03", "scene0365_00", "scene0113_00", "scene0456_00", "scene0001_00", "scene0294_02", "scene0380_02", "scene0572_01", "scene0697_00", "scene0616_01", "scene0630_01", "scene0574_01", "scene0626_00", "scene0587_02", "scene0090_00", "scene0307_01", "scene0203_00", "scene0007_00", "scene0011_01", "scene0248_02", "scene0598_00", "scene0422_00", "scene0151_01", "scene0363_00", "scene0103_01", "scene0177_01", "scene0476_00", "scene0073_02", "scene0021_00", "scene0161_02", "scene0268_00", "scene0564_00", "scene0365_02", "scene0600_02", "scene0338_01", "scene0424_00", "scene0674_00", "scene0619_00", "scene0520_01", "scene0536_02", "scene0337_00", "scene0459_01", "scene0597_01", "scene0075_00", "scene0271_00", "scene0055_02", "scene0380_00", "scene0446_01", "scene0135_00", "scene0530_00", "scene0223_00", "1mp3d_0013_region19", "1mp3d_0001_region1", "1mp3d_0070_region0", "1mp3d_0005_region23", "1mp3d_0043_region21", "1mp3d_0065_region24", "1mp3d_0048_region2", "1mp3d_0016_region8", "1mp3d_0035_region25", "1mp3d_0040_region10", "1mp3d_0057_region1", "1mp3d_0039_region38", "1mp3d_0069_region8", "1mp3d_0048_region17", "1mp3d_0050_region10", "1mp3d_0045_region2", "1mp3d_0064_region5", "1mp3d_0000_region11", "1mp3d_0030_region17", "1mp3d_0013_region0", "1mp3d_0039_region35", "1mp3d_0069_region41", "1mp3d_0033_region12", "1mp3d_0018_region16", "1mp3d_0018_region4", "1mp3d_0010_region3", "1mp3d_0065_region12", "1mp3d_0035_region28", "1mp3d_0013_region2", "1mp3d_0039_region40", "1mp3d_0005_region17", "1mp3d_0043_region15", "1mp3d_0000_region13", "1mp3d_0039_region76", "1mp3d_0067_region4", "1mp3d_0059_region0", "1mp3d_0057_region3", "1mp3d_0060_region19", "1mp3d_0026_region0", "1mp3d_0039_region42", "1mp3d_0036_region22", "1mp3d_0018_region6", "1mp3d_0043_region9", "1mp3d_0008_region19", "1mp3d_0039_region79", "1mp3d_0023_region12", "1mp3d_0003_region19", "1mp3d_0038_region29", "1mp3d_0084_region1", "1mp3d_0001_region39", "1mp3d_0012_region31", "1mp3d_0013_region14", "1mp3d_0018_region20", "1mp3d_0062_region1", "1mp3d_0076_region15", "1mp3d_0006_region10", "1mp3d_0026_region19", "1mp3d_0023_region10", "1mp3d_0075_region7", "1mp3d_0025_region1", "1mp3d_0087_region0", "1mp3d_0030_region15", "1mp3d_0043_region6", "1mp3d_0003_region22", "1mp3d_0013_region20", "1mp3d_0029_region35", "1mp3d_0035_region13", "1mp3d_0003_region16", "1mp3d_0026_region2", "1mp3d_0016_region10", "1mp3d_0001_region36", "1mp3d_0052_region6", "1mp3d_0026_region14", "1mp3d_0001_region3", "1mp3d_0066_region17", "1mp3d_0032_region7", "1mp3d_0033_region24", "1mp3d_0004_region6", "1mp3d_0069_region7", "1mp3d_0069_region39", "1mp3d_0078_region11", "1mp3d_0035_region11", "1mp3d_0084_region3", "1mp3d_0040_region24", "1mp3d_0038_region12", "1mp3d_0065_region10", "1mp3d_0069_region36", "1mp3d_0004_region9", "1mp3d_0060_region20", "1mp3d_0038_region24", "1mp3d_0069_region5", "1mp3d_0026_region22", "1mp3d_0002_region2", "1mp3d_0060_region16", "1mp3d_0048_region15", "1mp3d_0036_region16", "1mp3d_0060_region14", "1mp3d_0066_region23", "1mp3d_0066_region15", "1mp3d_0064_region8", "1mp3d_0043_region17", "1mp3d_0003_region20", "1mp3d_0044_region46", "1mp3d_0040_region7", "1mp3d_0013_region22", "1mp3d_0040_region12", "1mp3d_0033_region26", "1mp3d_0043_region4", "1mp3d_0009_region9", "1mp3d_0038_region10", "1mp3d_0025_region3", "1mp3d_0069_region43", "1mp3d_0036_region19", "1mp3d_0040_region8", "1mp3d_0004_region4", "1mp3d_0078_region7", "1mp3d_0043_region23", "1mp3d_0002_region0", "1mp3d_0039_region37", "1mp3d_0081_region4", "1mp3d_0073_region1", "1mp3d_0064_region7", "1mp3d_0018_region22", "1mp3d_0066_region21", "1mp3d_0075_region8", "1mp3d_0026_region20", "1mp3d_0039_region1", "1mp3d_0018_region14", "1mp3d_0069_region34", "1mp3d_0061_region2", "1mp3d_0065_region26", "1mp3d_0018_region19", "1mp3d_0023_region7", "1mp3d_0048_region0", "1mp3d_0036_region20", "1mp3d_0003_region14", "1mp3d_0025_region13", "1mp3d_0013_region16", "1mp3d_0039_region3", "1mp3d_0032_region5", "1mp3d_0044_region31", "1mp3d_0040_region53", "1mp3d_0075_region5", "1mp3d_0006_region12", "1mp3d_0026_region16", "1mp3d_0082_region8", "1mp3d_0046_region1", "1mp3d_0035_region50", "1mp3d_0040_region51", "1mp3d_0032_region8", "1mp3d_0033_region10", "1mp3d_0041_region35", "1mp3d_0009_region4", "1mp3d_0044_region44", "1mp3d_0039_region74", "1mp3d_0067_region6", "1mp3d_0016_region5", "1mp3d_0087_region2", "1mp3d_0010_region1", "1mp3d_0050_region12", "1mp3d_0040_region29", "1mp3d_0005_region21", "1mp3d_0078_region13", "1mp3d_0044_region33", "1mp3d_0062_region3", "1mp3d_0061_region0", "1mp3d_0016_region7", "1mp3d_0005_region15", "1mp3d_0030_region18", "1mp3d_0010_region11", "1mp3d_0067_region9", "1mp3d_0082_region5", "1mp3d_0082_region7", "1mp3d_0010_region13", "1mp3d_0017_region35", "1mp3d_0073_region3", "1mp3d_0076_region4", "1mp3d_0046_region11", "1mp3d_0009_region6", "1mp3d_0018_region9", "1mp3d_0005_region18", "1mp3d_0043_region18", "1mp3d_0045_region0", "1mp3d_0001_region34", "1mp3d_0041_region38", "1mp3d_0012_region33", "1mp3d_0012_region44", "1mp3d_0033_region29", "1mp3d_0048_region18", "1mp3d_0078_region8", "1mp3d_0073_region11", "1mp3d_0059_region2", "1mp3d_0073_region13", "1mp3d_0076_region9", "1mp3d_0036_region14", "1mp3d_0076_region6", "1mp3d_0001_region43", "1mp3d_0023_region8", "1mp3d_0001_region41", "1mp3d_0052_region4", "1mp3d_0040_region5", "1mp3d_0046_region13", "1mp3d_0035_region27", "1mp3d_0016_region12", "1mp3d_0040_region26", "1mp3d_0023_region5", "1mp3d_0008_region16", "1mp3d_0046_region3", "1mp3d_0008_region14", "4d3d82a4-8cf4-2e04-8144-95d355266160", "8eabc443-5af7-2f32-866f-bd64e5df6196", "bcb0fe1b-4f39-2c70-9f8c-2256ea9752ab", "c92fb582-f771-2064-86b3-52c04298e4e6", "bf9a3dbc-45a5-2e80-80d0-6dc2f6a6f1e9", "baf673a8-8e94-22f8-837d-d39420725d90", "5630cfda-12bf-2860-8511-9baf30eec4ad", "1776ad82-4db7-2333-89e4-d73159ac81d0", "b8837e41-57ec-29c6-8a7e-3cf8b52de8b6", "80b85893-4a8d-222f-8519-a9d06c205653", "05c6ede7-2e69-23b1-8b27-c1cb868f1938", "0cac7538-8d6f-2d13-8c23-d635c21d0f17", "f2c76feb-2239-29d0-8418-72b6051fc144", "20c993cf-698f-29c5-86c7-e2ee784936a1", "55551075-36f1-29c0-8b64-97a4e71b90ba", "ddc73793-765b-241a-9ecd-b0cebb7cf916", "8eabc426-5af7-2f32-87bb-a16609b099e3", "8eabc401-5af7-2f32-85fe-77e22d8d97f8", "0cac75ad-8d6f-2d13-8c74-5de4dfc4affc", "7ab2a9cb-ebc6-2056-8ba2-7835e43d47d3", "8eabc42a-5af7-2f32-855e-f114989e7025", "0cac7604-8d6f-2d13-8e39-784396ee3b8c", "6993478c-1286-2e5b-8023-181d6efc14f9", "b05fdd5a-fca0-2d4f-89fd-d54bd26a115b", "569d8f20-72aa-2f24-8a85-2d43d390ff9d", "c7895f09-339c-2d13-830d-e150ff0df2b3", "20c993c9-698f-29c5-850e-2a93df894437", "d73fd1da-8aae-2838-81c0-a7482e6a76f5", "7272e182-a01b-20f6-89b8-3bdec0091c89", "095821f7-e2c2-2de1-9568-b9ce59920e29", "ad408c93-84db-2095-8829-6100d1b70d80", "b901681d-e754-293c-8cb3-22aae72dbd42", "2e36955f-e133-204c-9372-e883fa503f74", "ad408c95-84db-2095-8af5-735ff2eb50f5", "751a55a5-fe61-2c3b-8d49-350e8169e05e", "6bde60e6-9162-246f-8d7f-823d12b5e358", "77361fbc-d054-2a22-8bd1-20da69ee28dc", "531cfefe-0021-28f6-8c6c-35ae26d2158f", "09582246-e2c2-2de1-95ae-2afbd2e21231", "6bde6057-9162-246f-8d07-dfba45622e09", "13124cbe-cec3-27a6-8745-6e02a03494d2", "b05fdd68-fca0-2d4f-895a-2f40f6b4aca9", "55551081-36f1-29c0-88ad-9518e9fd2f4f", "22312382-b828-2c09-9267-bad5dd60ac79", "c7895f5f-339c-2d13-82db-e99a08b5f679", "8f0f142e-55de-28ce-8073-1629a4573a6a", "bf9a3dac-45a5-2e80-8073-0fe4e80c0e99", "137a8158-1db5-2cc0-8003-31c12610471e", "4a9a43eb-7736-2874-862e-a4c23d88831f", "b05fddd9-fca0-2d4f-8904-936aa0b678e6", "0cac7619-8d6f-2d13-8f36-ac562ec9a4de", "7794146a-cfdf-29cb-8705-f3f89d9ed3ae", "bf9a3dd3-45a5-2e80-8163-8a5b2573aca4", "48005c67-7d67-29ec-85e7-ad30a2aac1e0", "1d233ff6-e280-2b1a-8ed5-3eec62f9bb3f", "7ab2a9bf-ebc6-2056-8bd5-903a96eb7e99", "4731977c-f9f7-2a1a-976c-34c48a84405c", "5341b7b3-8a66-2cdd-856d-9d70e194568b", "4a9a43e0-7736-2874-87ac-c589da4d0f00", "c7895f91-339c-2d13-8095-ca529bcb8be5", "4138584b-a238-2435-8128-a939fb07c1c8", "d7d40d64-7a5d-2b36-9760-5e432257897d", "c92fb573-f771-2064-87cc-2fd7faae29e0", "10b1793c-3938-2467-897f-85961a4b3677", "352e9c55-69fb-27a7-8bac-3b46e5b77c10", "c7895f8f-339c-2d13-81a3-84bc18b475b5", "0cac7558-8d6f-2d13-8fe1-c8af0362735d", "b9016821-e754-293c-8f2b-832f5cb6f0db", "75c25973-9ca2-2844-96f4-90cd531364ac", "752cc581-920c-26f5-8e51-9520e8441118", "1dd720a1-2ba0-22d9-8b6e-bb00c888a414", "8e0f1c2f-9e28-2339-85ae-05fc50d1a3a7", "0cac7597-8d6f-2d13-8c9c-6f8542021d5c", "5341b7d7-8a66-2cdd-8413-00cf17af67ba", "787ed58a-9d98-2c97-826e-8e98355c30d7", "422885b3-192d-25fc-84c9-9b80eea1752d", "8eabc403-5af7-2f32-84aa-ecb06ef26b8b", "dc42b382-8d5c-2d2a-8424-d19d7bc2eec3", "28298cd3-15b5-24be-89cc-1ed5870c1f57", "d63767c1-3205-226c-9a47-70f388b93739", "185d741b-3698-223c-8ba0-48db6ecbe220", "6bde607d-9162-246f-8f84-98cf7ac2374c", "0cac7595-8d6f-2d13-8cad-f51af4c9aba2", "8f0f1434-55de-28ce-80ee-1f84b796e1cc", "c92fb5a4-f771-2064-87c5-f2d2162ceae7", "1c211548-f201-2d25-8686-8e8069116451", "0cac7629-8d6f-2d13-8e5a-9f17681451c8", "c8b34d04-5ea0-2d69-9284-486dc8fab63f", "752cc59d-920c-26f5-8d45-d78b49fe63f3", "d7d40d6e-7a5d-2b36-9548-8c19e9b50063", "bcb0fe2d-4f39-2c70-9d1a-49a4a6868d7d", "6ed384fe-7db9-2d45-8086-01bab3279730", "77361fc6-d054-2a22-8971-e4faf5504a44", "43b8cae9-6678-2e38-9bc2-8c31ab832242", "6a360551-fa53-2915-9739-7b7c1857bed3", "6bde60c0-9162-246f-8d1f-32543babecfb", "eee5b052-ee2d-28f4-99fd-c3c5380db25e", "5555107f-36f1-29c0-8903-9b66fb2301d0", "0f2f2711-b736-2f71-8c6a-245a9d820a8c", "752cc583-920c-26f5-8fcc-02c767693c60", "02b33e03-be2b-2d54-9129-5d28efdd68fa", "bf9a3ddd-45a5-2e80-80bc-647365c7ca08", "5630cfcf-12bf-2860-8784-83d28a611a83", "4a9a43e4-7736-2874-8479-0e81135ccdb0", "ddc73799-765b-241a-9c30-f75dcb7627d4", "8eabc409-5af7-2f32-85b1-54d9794543c9", "7272e190-a01b-20f6-8bde-769bcd548633", "0cac75ea-8d6f-2d13-8e50-c5faf0159e32", "355465d4-d29b-29f9-9550-e366a10d6924", "4fbad31e-465b-2a5d-84b7-c0ddea978db4", "2451c057-fae8-24f6-92b8-df89e0429f6e", "742e8f15-be0a-294e-9ebb-6c72dbcb9662", "4e858c81-fd93-2cb4-8469-d9226116b5de", "ddc737b3-765b-241a-9c35-6b7662c04fc9", "8eabc416-5af7-2f32-862f-bc1a368d2932", "5630cfcb-12bf-2860-87ee-b4e4a5bf0cb0", "b8837e1c-57ec-29c6-8900-ce1eaa76e959", "9766cbfd-6321-2e2f-8348-7da212a30beb", "b8837e16-57ec-29c6-89c3-0e8b7d668f56", "baf6738d-8e94-22f8-83db-20528ddbef86", "422885c1-192d-25fc-8798-e8a7aab48121", "422885c7-192d-25fc-85f5-67ba0d80ade5", "501ebf0b-a3bb-263f-86fd-7ef000a19588", "6bde6070-9162-246f-8ea9-c8bbe5d7133a", "4a9a43d4-7736-2874-87a6-0c3089281af8", "c7895f42-339c-2d13-814f-994062b318b0", "95be45d9-a558-22da-9d6f-3d380c02f40e", "6a36054b-fa53-2915-946e-4ec15f811f6e", "80b85891-4a8d-222f-85d0-2b8bf16d79a8", "0cac75ce-8d6f-2d13-8cf1-add4e795b9b0", "4d3d82a6-8cf4-2e04-828b-ceb5235b58a8", "55551093-36f1-29c0-8b29-f80bcfdcc5df", "6a360541-fa53-2915-9432-e4b7e065aa73", "0cac761b-8d6f-2d13-8f16-23a7d73c54fe", "355465d0-d29b-29f9-94f2-7707ac505bae", "7ab2a9cd-ebc6-2056-88a0-7f85836edb91", "bf9a3dd9-45a5-2e80-817c-f918e193231b", "422885d4-192d-25fc-862e-b0cc5a8c13e1", "d7d40d40-7a5d-2b36-977c-4e35fdd5f03a", "5104a9c7-adc4-2a85-9026-45557dcf9a87", "b05fddcf-fca0-2d4f-8a01-c6bdcc9fba8d", "0cac7599-8d6f-2d13-8e7f-4217750a64cc", "283ccfed-107c-24d5-8b72-5f6004ef4f94", "ba6fdaa0-a4c1-2dca-80a9-df196c04fcc8", "5555108d-36f1-29c0-8b37-5efa2bef59d4", "2e36955d-e133-204c-90da-122ae14d42a3", "4fbad329-465b-2a5d-8401-a3f550ef3de5", "0cac7644-8d6f-2d13-8e31-4a5219d710b9", "ebc42044-82a4-2113-8789-1c8c8bb7cbcd", "7272e189-a01b-20f6-8a2e-05b6c8395143", "47319776-f9f7-2a1a-9461-d2e201735263", "6bde605b-9162-246f-8f73-3a0e3627db01", "1d233fea-e280-2b1a-8f15-cd60d12ae197", "422885ce-192d-25fc-851a-df2d675a6559", "4138583f-a238-2435-8304-d0ccada0d1c6", "8eabc451-5af7-2f32-87b5-026aa18e3190", "02b33df9-be2b-2d54-9062-1253be3ce186", "10b17946-3938-2467-8bd9-0cc6be6dff91", "ad408ca9-84db-2095-8afa-cc17150ea346", "569d8f1c-72aa-2f24-89bb-0df7a0653c26", "8eabc414-5af7-2f32-8797-72769173455b", "0cac762b-8d6f-2d13-8de2-832a3c07864b", "scene0216_00", "1mp3d_0061_region23", "scene0239_01", "1mp3d_0036_region3", "1mp3d_0061_region15", "1mp3d_0003_region3", "scene0525_02", "2ea047d1-aeca-2021-8a44-8ca8d08fe50b", "scene0463_00", "1mp3d_0029_region0", "scene0139_00", "09582223-e2c2-2de1-94b6-750684b4f80a", "1mp3d_0071_region3", "scene0181_01", "1mp3d_0022_region4", "1mp3d_0035_region38", "751a55a7-fe61-2c3b-8caa-1aa9dfb86ece", "1mp3d_0002_region23", "scene0287_00", "scene0197_02", "scene0567_01", "scene0304_00", "scene0187_01", "scene0206_01", "scene0383_01", "scene0334_01", "scene0095_01", "scene0594_00", "scene0356_00", "scene0297_01", "scene0244_00", "scene0324_00", "scene0050_01", "scene0465_00", "scene0032_00", "scene0012_02", "scene0489_01", "scene0340_01", "scene0486_00", "scene0421_01", "scene0551_00", "scene0416_04", "scene0445_00", "scene0079_00", "scene0210_00", "scene0136_01", "scene0034_02", "scene0106_02", "scene0226_01", "scene0692_01", "scene0242_02", "scene0648_01", "scene0635_00", "scene0191_00", "scene0568_02", "scene0152_00", "scene0474_05", "scene0346_01", "scene0667_02", "scene0012_00", "scene0126_00", "scene0623_01", "scene0547_01", "scene0066_00", "scene0505_02", "scene0701_00", "scene0431_00", "scene0040_00", "scene0060_00", "scene0083_00", "scene0678_02", "scene0236_00", "scene0370_00", "scene0209_02", "scene0264_02", "scene0298_00", "scene0408_00", "scene0356_02", "scene0119_00", "scene0533_01", "scene0568_00", "scene0393_00", "scene0110_01", "scene0200_01", "scene0558_01", "scene0418_01", "scene0172_00", "scene0548_00", "scene0661_00", "scene0515_01", "scene0022_01", "scene0154_00", "scene0407_01", "scene0350_00", "scene0411_00", "scene0677_01", "scene0322_00", "scene0428_00", "scene0437_00", "scene0272_01", "scene0046_00", "scene0584_01", "scene0557_02", "scene0625_01", "scene0571_00", "scene0395_02", "scene0525_00", "scene0633_00", "scene0229_00", "scene0174_00", "scene0369_02", "scene0164_01", "scene0385_01", "scene0692_03", "scene0370_02", "scene0274_01", "scene0059_02", "scene0603_01", "scene0109_01", "scene0376_00", "scene0475_01", "scene0359_01", "scene0188_00", "scene0164_03", "scene0350_02", "scene0085_00", "scene0577_00", "scene0694_01", "scene0667_00", "scene0503_00", "scene0578_01", "scene0376_02", "scene0181_03", "scene0254_01", "scene0242_00", "scene0116_01", "scene0651_01", "scene0701_02", "scene0291_01", "scene0106_00", "scene0548_02", "scene0002_01", "scene0523_00", "scene0523_02", "scene0647_00", "scene0101_04", "scene0152_02", "scene0678_00", "scene0220_01", "scene0144_01", "scene0605_01", "scene0126_02", "scene0443_00", "scene0641_00", "scene0393_02", "scene0671_01", "scene0395_00", "scene0034_00", "scene0505_00", "scene0281_00", "scene0046_02", "scene0473_01", "scene0191_02", "scene0312_01", "scene0288_01", "scene0100_02", "scene0499_00", "scene0056_01", "scene0684_00", "scene0014_00", "scene0411_02", "scene0453_01", "scene0262_00", "scene0302_00", "scene0024_01", "scene0682_00", "scene0613_00", "scene0541_01", "scene0093_01", "scene0120_00", "scene0209_00", "scene0059_00", "scene0142_01", "scene0230_00", "scene0100_00", "scene0613_02", "scene0582_01", "scene0349_00", "scene0332_01", "scene0561_01", "scene0592_00", "scene0417_00", "scene0480_00", "scene0557_00", "scene0615_00", "scene0197_00", "scene0264_00", "scene0658_00", "scene0229_02", "scene0369_00", "1mp3d_0019_region23", "1mp3d_0019_region15", "1mp3d_0013_region32", "1mp3d_0027_region1", "1mp3d_0072_region0", "1mp3d_0065_region6", "1mp3d_0059_region16", "1mp3d_0084_region12", "1mp3d_0047_region2", "1mp3d_0050_region9", "1mp3d_0012_region3", "1mp3d_0004_region16", "1mp3d_0047_region29", "1mp3d_0006_region4", "1mp3d_0024_region2", "1mp3d_0002_region17", "1mp3d_0044_region15", "1mp3d_0001_region26", "1mp3d_0017_region13", "1mp3d_0008_region5", "1mp3d_0057_region24", "1mp3d_0039_region13", "1mp3d_0017_region4", "1mp3d_0008_region7", "1mp3d_0050_region4", "1mp3d_0061_region18", "1mp3d_0067_region14", "1mp3d_0035_region40", "1mp3d_0014_region20", "1mp3d_0041_region4", "1mp3d_0032_region11", "1mp3d_0062_region26", "1mp3d_0044_region23", "1mp3d_0042_region20", "1mp3d_0017_region6", "1mp3d_0057_region26", "1mp3d_0002_region18", "1mp3d_0077_region14", "1mp3d_0012_region23", "1mp3d_0065_region4", "1mp3d_0041_region27", "1mp3d_0005_region31", "1mp3d_0082_region11", "1mp3d_0013_region45", "1mp3d_0064_region13", "1mp3d_0044_region21", "1mp3d_0017_region9", "1mp3d_0044_region17", "1mp3d_0062_region12", "1mp3d_0042_region14", "1mp3d_0022_region28", "1mp3d_0062_region10", "1mp3d_0002_region21", "1mp3d_0005_region5", "1mp3d_0027_region23", "1mp3d_0030_region7", "1mp3d_0033_region6", "1mp3d_0014_region14", "1mp3d_0041_region9", "1mp3d_0006_region6", "1mp3d_0085_region2", "1mp3d_0059_region19", "1mp3d_0079_region10", "1mp3d_0029_region27", "1mp3d_0044_region3", "1mp3d_0033_region34", "1mp3d_0014_region5", "1mp3d_0069_region12", "1mp3d_0011_region0", "1mp3d_0062_region24", "1mp3d_0006_region9", "1mp3d_0004_region22", "1mp3d_0060_region3", "1mp3d_0014_region16", "1mp3d_0069_region29", "1mp3d_0062_region29", "1mp3d_0027_region3", "1mp3d_0074_region6", "1mp3d_0005_region7", "1mp3d_0047_region12", "1mp3d_0035_region35", "1mp3d_0011_region2", "1mp3d_0005_region33", "1mp3d_0040_region39", "1mp3d_0012_region15", "1mp3d_0019_region18", "1mp3d_0072_region10", "1mp3d_0039_region69", "1mp3d_0067_region16", "1mp3d_0047_region0", "1mp3d_0032_region25", "1mp3d_0001_region10", "1mp3d_0079_region4", "1mp3d_0069_region10", "1mp3d_0013_region30", "1mp3d_0041_region6", "1mp3d_0061_region21", "1mp3d_0005_region8", "1mp3d_0042_region16", "1mp3d_0074_region11", "1mp3d_0057_region10", "1mp3d_0014_region19", "1mp3d_0022_region11", "1mp3d_0022_region25", "1mp3d_0033_region9", "1mp3d_0071_region21", "1mp3d_0014_region8", "1mp3d_0029_region2", "1mp3d_0064_region11", "1mp3d_0041_region13", "1mp3d_0039_region66", "1mp3d_0044_region1", "1mp3d_0088_region0", "1mp3d_0039_region28", "1mp3d_0047_region26", "1mp3d_0041_region11", "1mp3d_0042_region7", "1mp3d_0079_region12", "1mp3d_0033_region4", "1mp3d_0077_region5", "1mp3d_0082_region13", "1mp3d_0027_region18", "1mp3d_0012_region1", "1mp3d_0038_region2", "1mp3d_0071_region18", "1mp3d_0001_region29", "1mp3d_0014_region7", "1mp3d_0039_region25", "1mp3d_0077_region16", "1mp3d_0039_region50", "1mp3d_0044_region18", "1mp3d_0074_region4", "1mp3d_0001_region53", "1mp3d_0077_region8", "1mp3d_0026_region30", "1mp3d_0041_region28", "1mp3d_0027_region21", "1mp3d_0084_region10", "1mp3d_0035_region0", "1mp3d_0038_region0", "1mp3d_0011_region12", "1mp3d_0047_region10", "1mp3d_0035_region2", "1mp3d_0067_region19", "1mp3d_0058_region1", "1mp3d_0002_region15", "1mp3d_0040_region34", "1mp3d_0013_region47", "1mp3d_0024_region0", "1mp3d_0085_region0", "1mp3d_0017_region27", "1mp3d_0059_region14", "1mp3d_0027_region15", "1mp3d_0032_region13", "1mp3d_0071_region1", "1mp3d_0027_region17", "1mp3d_0077_region7", "1mp3d_0074_region9", "1mp3d_0066_region5", "1mp3d_0022_region27", "1mp3d_0030_region5", "1mp3d_0041_region25", "1mp3d_0029_region25", "1mp3d_0013_region48", "1mp3d_0083_region4", "1mp3d_0003_region1", "1mp3d_0019_region7", "1mp3d_0079_region9", "1mp3d_0035_region37", "1mp3d_0029_region13", "1mp3d_0035_region42", "1mp3d_0019_region5", "1mp3d_0042_region19", "1mp3d_0067_region20", "1mp3d_0017_region28", "1mp3d_0004_region19", "1mp3d_0050_region6", "1mp3d_0014_region22", "1mp3d_0040_region43", "1mp3d_0040_region41", "1mp3d_0011_region10", "1mp3d_0069_region26", "1mp3d_0061_region17", "1mp3d_0030_region8", "1mp3d_0029_region28", "1mp3d_0004_region20", "1mp3d_0032_region27", "1mp3d_0022_region13", "1mp3d_0029_region11", "1mp3d_0072_region2", "1mp3d_0039_region64", "1mp3d_0066_region7", "1mp3d_0042_region5", "1mp3d_0019_region8", "1mp3d_0017_region11", "1mp3d_0012_region17", "1mp3d_0000_region2", "1mp3d_0042_region22", "1mp3d_0074_region13", "1mp3d_0071_region15", "1mp3d_0001_region12", "1mp3d_0057_region12", "1mp3d_0079_region6", "1mp3d_0001_region24", "1mp3d_0069_region24", "1mp3d_0017_region25", "1mp3d_0039_region27", "1mp3d_0004_region14", "1mp3d_0008_region8", "1mp3d_0066_region8", "1mp3d_0067_region22", "1mp3d_0036_region1", "1mp3d_0039_region52", "1mp3d_0040_region36", "1mp3d_0012_region18", "1mp3d_0000_region0", "1mp3d_0012_region21", "1mp3d_0019_region17", "1mp3d_0039_region11", "1mp3d_0038_region34", "1mp3d_0065_region9", "1mp3d_0022_region9", "1mp3d_0019_region21", "1mp3d_0042_region8", "1mp3d_0047_region24", "1mp3d_0071_region17", "1mp3d_0022_region6", "1mp3d_0060_region1", "dc42b36c-8d5c-2d2a-86aa-19a8929361fd", "c92fb5b5-f771-2064-8570-dbe16cb33764", "bf9a3dbe-45a5-2e80-80ee-f78c2b525234", "dbeb4d1f-faf9-2324-98d1-605c3c0c0658", "87e6cf7b-9d1a-289f-8692-57e5757dac99", "c2d99349-1947-2fbf-837e-a0bd5e027c52", "bf9a3d9c-45a5-2e80-832c-618e55d4e705", "ad408c8b-84db-2095-8b87-5c3079e807fe", "5341b7d3-8a66-2cdd-8633-0a3da632befa", "fcf66db6-622d-291c-8740-9e40c21de689", "c7895f7c-339c-2d13-819f-3bb0b26c91f6", "0cac753c-8d6f-2d13-8e27-e0664fc33bb9", "ad408cb1-84db-2095-8a03-942866890a31", "422885d9-192d-25fc-85de-b954f526b2ac", "1d234022-e280-2b1a-8cec-e9787bb0d7b2", "c92fb598-f771-2064-876d-327776fda299", "77941460-cfdf-29cb-86c7-1f60e2ecd07a", "6bde603f-9162-246f-8ca4-5bb5c9fe9974", "ad408c97-84db-2095-88e0-de11e52a06ef", "0cac7648-8d6f-2d13-8e30-76663c19baa4", "4d3d82a0-8cf4-2e04-800f-97deb20e860b", "ba6fdaac-a4c1-2dca-8380-f16765679fd7", "b1d87fb8-e72e-2c8c-9eda-712d51a0175f", "c9fb7aa7-2a5b-2cf7-916d-d38eeeeda87f", "ddc737a9-765b-241a-9dd7-bf14079f4804", "fcf66db0-622d-291c-8734-2a41fae7deb2", "5341b7a7-8a66-2cdd-86e5-6b641b0004d2", "47319778-f9f7-2a1a-9684-445309ac8cf9", "2e36954d-e133-204c-92aa-5cf8c2f8b46f", "7747a51f-9431-24e8-8720-19e267b3b828", "6bde6049-9162-246f-8c0e-25e1e80c0a45", "cdcaf5bb-ddd8-2ed6-9714-55394577db57", "1d23401e-e280-2b1a-8cef-f28df9df2618", "bcb0fe23-4f39-2c70-9d69-698c5ff4435c", "1776ad7c-4db7-2333-88b3-edb7a9803f6f", "422885de-192d-25fc-8753-c094c3aaae0b", "b8837e38-57ec-29c6-88a0-58602a876ed0", "7747a4ec-9431-24e8-848f-897279a1e9fe", "c92fb5af-f771-2064-8476-871572e38970", "7747a50c-9431-24e8-877d-e60c3a341cc2", "422885e2-192d-25fc-86cd-076ee5f2b412", "422885c5-192d-25fc-85e6-12a3d65c8e7b", "8eabc44f-5af7-2f32-87af-ff14de136a7c", "bf9a3de7-45a5-2e80-81a4-fd6126f6417b", "f2c76fe9-2239-29d0-87ec-f2c7ced812c1", "8eabc46d-5af7-2f32-85bd-f957e754c064", "1d23400e-e280-2b1a-8dec-67a551146c3a", "634b2181-f5d0-2fb7-8547-fd27b0795137", "20c993c7-698f-29c5-8685-0d1a2a4a3496", "8eabc42e-5af7-2f32-862f-b120deba1f93", "a0905fdd-66f7-2272-9cdb-89360888ea67", "1776ad86-4db7-2333-8935-240e44ccb16d", "8eabc447-5af7-2f32-8712-301083e291b3", "c7895f46-339c-2d13-81c6-395674134ef2", "a0905ff2-66f7-2272-9ec9-303aefe3713c", "a0905ffe-66f7-2272-9c39-02f47d8bbb0f", "77361fd4-d054-2a22-88c4-b5b404f904ca", "bf9a3ddf-45a5-2e80-8007-8e9e7f323e52", "d7d40d44-7a5d-2b36-96d7-23cb75af862f", "cdcaf5bd-ddd8-2ed6-97c3-489e105e4dde", "1c211552-f201-2d25-87ce-81e360c07b4a", "8eabc422-5af7-2f32-860b-db7caac59e10", "f24d4c1d-6e46-207b-858d-0aec6dfe1c14", "18d4d91f-7eb5-280e-8757-1eb6f43ecfd2", "6bde6097-9162-246f-8d95-70c71505f2e3", "634d11d7-6833-255d-8c1e-2a0af54ac101", "422885bf-192d-25fc-8509-1fd7def9cc19", "d7dc987e-a34a-2794-85c8-f75389b27532", "68bae772-3567-2f7c-804c-d77a47cdc508", "d7d40d37-7a5d-2b36-97cc-108f3c8e07f9", "c6942e7f-695b-27bf-8145-4b47b41edad6", "0cac7534-8d6f-2d13-8de7-8a915ed90050", "48699c02-d4a2-2f10-81c3-552d06ffcbb8", "4a9a43d6-7736-2874-84c3-86ec156a3b2d", "6bde6053-9162-246f-8d5d-54e5e3dd721d", "baf67399-8e94-22f8-81b1-900b40b196d7", "55551071-36f1-29c0-8a92-6c77b608161f", "1d234014-e280-2b1a-8eab-fe44989693aa", "bf9a3db4-45a5-2e80-80d9-a1842899ef45", "751a5581-fe61-2c3b-8cac-76cacfb277e0", "b1d87fb4-e72e-2c8c-9ddc-75cf44f9984c", "1713f852-5300-2104-8658-73b77b66af61", "bcb0fe27-4f39-2c70-9dae-5c8625b3553d", "0cac75c6-8d6f-2d13-8d35-f0128b4fb7a9", "531cff00-0021-28f6-8f9c-d0fe2a031f9e", "cf00577e-b8f0-2aa8-8751-be34df30341f", "fcf66db8-622d-291c-8502-863b391b9ef1", "751a558a-fe61-2c3b-8f24-9c288e5fca94", "77941466-cfdf-29cb-865f-0e2fcea3af87", "38770c99-86d7-27b8-85c7-2a09ba38e105", "531cff02-0021-28f6-8deb-a71f6c628346", "f2c76fe5-2239-29d0-8593-1a2555125595", "b1f23303-d255-2761-9622-61de85559f99", "0cac7664-8d6f-2d13-8e1d-b97c9ab20b12", "0ad2d3a3-79e2-2212-9a51-9094be707ec2", "3b7b33af-1b11-283e-9abd-170c6329c0e6", "77941462-cfdf-29cb-85a6-eb23498f9206", "5630cfd5-12bf-2860-857e-914ddf3d09f5", "baf673ae-8e94-22f8-82be-ce30b52711a4", "baf0a8e8-26d4-2033-8bbb-975c6002fbb7", "f3d7fa58-2835-2805-83bc-d2c583045bb4", "c2d9933f-1947-2fbf-807f-c44bc1aed269", "c12890de-d3df-2d0d-85bf-b248e7c3431d", "8eabc41a-5af7-2f32-8677-c1e3f9b04e62", "531cff08-0021-28f6-8e08-ba2eeb945e09", "787ed580-9d98-2c97-8167-6d3b445da2c0", "6e67e55f-1209-2cd0-8194-8c6278434c80", "752cc5a1-920c-26f5-8e94-4711ff3be870", "c7895f74-339c-2d13-8030-f7f2f8504589", "2e36954f-e133-204c-9320-34d52370eb4d", "6bde60be-9162-246f-8e66-adb6a3511e6e", "55551097-36f1-29c0-88fe-2fb72427887a", "569d8f13-72aa-2f24-8b64-3bde3b0603ab", "2e369549-e133-204c-91af-a19767a23bf2", "41385851-a238-2435-8056-b7d662a97c93", "0cac7642-8d6f-2d13-8f9f-eb19016049fa", "2451c041-fae8-24f6-9213-b8b6af8d86c1", "0f2f2719-b736-2f71-8e3e-46490f5fe5fe", "c7895f0f-339c-2d13-807d-cb2e7fe646fd", "43b8cae1-6678-2e38-9865-c19c07c25015", "0cac75b3-8d6f-2d13-8f4b-3d74070429df", "4e858c95-fd93-2cb4-853e-1233ae90402a", "09582242-e2c2-2de1-942f-d1001cbff56b", "c12890cf-d3df-2d0d-876d-f774cb9d9861", "43b8caf3-6678-2e38-99da-2aeb70cf8e95", "20c9939d-698f-29c5-85c6-3c618e00061f", "6bde6041-9162-246f-8e57-11444a314136", "0cac75b1-8d6f-2d13-8c17-9099db8915bc", "8eabc418-5af7-2f32-85a1-a2709b29c46d", "c7895f23-339c-2d13-818e-2375ea0ba6d6", "d7d40d3e-7a5d-2b36-97b3-dd11abc0e876", "a644cb97-0ee5-2f66-9cc7-3ecaa29c19df", "10b1795d-3938-2467-887f-fedbef4df869", "4d3d82ae-8cf4-2e04-80de-20f96c814d9c", "1d233fec-e280-2b1a-8e12-986a9af880f0", "f62fd5ff-9a3f-2f44-894c-462b3997d695", "55551099-36f1-29c0-88f9-114729f2705a", "4e858c9f-fd93-2cb4-8427-8d729dcc80ae", "422885ad-192d-25fc-8631-c3a978a9d3d4", "f38169cf-378c-2a65-855f-05d491a3f26e", "2e36953d-e133-204c-9045-d52ab9f09dcb", "aa20278e-8bc8-2c5b-9cfe-01db71781ed4", "1776ad88-4db7-2333-887f-8be533122e06", "ab835f94-54c6-29a1-9872-210a98f12c12", "bf9a3deb-45a5-2e80-8291-f0039d671ea1", "ea31825e-0a4c-2749-91f9-1cc45973a0f6", "1d234002-e280-2b1a-8c8d-6aafb5b35a24", "baf6739d-8e94-22f8-8147-feefe48f828b", "c7895f4e-339c-2d13-80d3-90eaa1403a25", "d7d40d75-7a5d-2b36-9746-3e807d3e7558", "bcb0fe04-4f39-2c70-9f03-d0eec592de24", "b05fdd58-fca0-2d4f-8bd7-e80abe1ddb4c", "7272e1a0-a01b-20f6-8b34-1fdc43d1f911", "a38a57d8-8a91-2e91-9237-0f8c220feef5", "20c993b5-698f-29c5-85a5-12b8deae78fb", "6bde6065-9162-246f-8d78-827bb6cbd6fe", "0cac7564-8d6f-2d13-8cb2-8b01c0a1b3d5", "a0905ff4-66f7-2272-9e2c-823789eb613a", "5630cfde-12bf-2860-8563-d68bdd98fab0", "5341b7e1-8a66-2cdd-87a3-02aad508ff86", "ddc737b1-765b-241a-9c21-a4e2674b2853", "baf0a8f8-26d4-2033-8af4-9d0603924ce1", "d9725be3-7513-2d91-853f-22df440e48a3", "6bde60ea-9162-246f-8e87-899570bd80e6", "bf9a3da2-45a5-2e80-8219-1f0a216399fe", "8eabc445-5af7-2f32-85ae-90deb8eb1b0b", "fcf66d7d-622d-291c-8542-f708b096b45f", "1d234016-e280-2b1a-8c7c-ce441e1ebb0f", "a0905fdf-66f7-2272-9c4c-e2390af0546d", "b1cf9968-9fdd-2189-97dd-d5f0d48bf70a", "scene0029_00", "1mp3d_0033_region17", "095821f9-e2c2-2de1-9707-8f735cd1c148", "scene0072_01", "185d7419-3698-223c-8b18-2dccc08c66d9", "1mp3d_0038_region21", "scene0098_00", "1mp3d_0026_region7", "6a360549-fa53-2915-97ad-3853a1cb460c", "4acaebce-6c10-2a2a-852f-98c6902bcc88", "1mp3d_0044_region39", "scene0020_01", "1mp3d_0041_region32", "scene0686_02", "1mp3d_0016_region0", "ad408cab-84db-2095-89c7-4a7d5aead447", "scene0484_00", "1mp3d_0006_region15", "scene0645_00", "scene0256_01", "scene0617_00", "scene0590_00", "scene0362_03", "scene0543_01", "scene0573_00", "scene0478_00", "scene0310_01", "scene0212_00", "scene0339_00", "scene0074_01", "scene0279_02", "scene0596_00", "scene0195_00", "scene0423_01", "scene0052_01", "scene0471_01", "scene0326_00", "scene0260_00", "scene0492_01", "scene0705_02", "scene0169_00", "scene0447_00", "scene0391_00", "scene0477_01", "scene0081_02", "scene0511_01", "scene0637_00", "scene0690_01", "scene0170_00", "scene0193_00", "scene0319_00", "scene0673_01", "scene0630_04", "scene0081_00", "scene0344_01", "scene0006_01", "scene0611_00", "scene0232_00", "scene0364_01", "scene0062_02", "scene0009_02", "scene0586_01", "scene0146_01", "scene0508_01", "scene0501_00", "scene0260_02", "scene0232_02", "scene0517_01", "scene0553_00", "scene0501_02", "scene0295_01", "scene0269_01", "scene0042_00", "scene0112_01", "scene0204_01", "scene0195_02", "scene0246_00", "scene0170_02", "scene0528_01", "scene0435_02", "scene0545_01", "scene0575_00", "scene0388_00", "scene0705_00", "scene0387_01", "scene0016_02", "scene0352_00", "scene0608_02", "scene0087_02", "scene0596_02", "scene0527_00", "scene0403_01", "scene0309_01", "scene0009_00", "scene0415_00", "scene0276_01", "scene0608_00", "scene0680_00", "scene0150_02", "scene0413_00", "scene0397_00", "scene0039_01", "scene0631_00", "scene0630_06", "scene0222_01", "scene0000_01", "scene0234_00", "scene0122_00", "scene0030_02", "scene0036_00", "scene0703_00", "scene0306_00", "scene0467_00", "scene0019_01", "scene0279_00", "scene0283_00", "scene0518_00", "scene0451_01", "scene0044_02", "scene0114_01", "scene0468_01", "scene0521_00", "scene0270_01", "scene0482_00", "scene0589_00", "scene0441_00", "scene0447_02", "scene0415_02", "scene0212_02", "scene0451_03", "scene0381_01", "scene0580_01", "scene0266_00", "scene0458_00", "scene0425_01", "scene0575_02", "scene0016_00", "scene0699_00", "scene0044_00", "scene0653_01", "scene0655_01", "scene0627_01", "scene0336_01", "scene0538_00", "scene0160_03", "scene0398_01", "scene0665_00", "scene0088_03", "scene0457_01", "scene0300_00", "scene0156_00", "scene0134_01", "scene0362_01", "scene0010_00", "scene0448_01", "scene0696_01", "scene0433_00", "scene0102_00", "scene0214_00", "scene0354_00", "scene0631_02", "scene0555_00", "scene0675_01", "scene0088_01", "scene0285_00", "scene0601_01", "scene0663_02", "scene0214_02", "scene0140_01", "scene0104_00", "scene0553_02", "scene0160_01", "scene0663_00", "scene0250_01", "scene0150_00", "scene0589_02", "scene0132_01", "scene0372_00", "scene0124_00", "scene0030_00", "scene0686_00", "scene0320_00", "scene0507_00", "scene0087_00", "scene0166_01", "scene0599_01", "scene0374_00", "scene0259_00", "scene0062_00", "scene0607_01", "scene0042_02", "scene0176_00", "scene0461_00", "scene0064_00", "scene0293_01", "scene0240_00", "scene0320_02", "scene0352_02", "scene0435_00", "scene0029_02", "scene0149_00", "scene0628_00", "scene0645_02", "scene0643_00", "scene0329_01", "scene0673_03", "scene0628_02", "1mp3d_0048_region10", "1mp3d_0065_region18", "1mp3d_0033_region15", "1mp3d_0002_region8", "1mp3d_0066_region10", "1mp3d_0003_region11", "1mp3d_0013_region28", "1mp3d_0076_region1", "1mp3d_0005_region12", "1mp3d_0036_region11", "1mp3d_0043_region10", "1mp3d_0064_region2", "1mp3d_0003_region13", "1mp3d_0023_region0", "1mp3d_0060_region13", "1mp3d_0010_region9", "1mp3d_0048_region7", "1mp3d_0043_region12", "1mp3d_0018_region1", "1mp3d_0003_region28", "1mp3d_0006_region18", "1mp3d_0016_region18", "1mp3d_0026_region8", "1mp3d_0035_region16", "1mp3d_0025_region14", "1mp3d_0075_region2", "1mp3d_0069_region44", "1mp3d_0005_region29", "1mp3d_0045_region11", "1mp3d_0013_region27", "1mp3d_0057_region4", "1mp3d_0033_region23", "1mp3d_0057_region9", "1mp3d_0023_region2", "1mp3d_0052_region1", "1mp3d_0073_region14", "1mp3d_0004_region1", "1mp3d_0064_region0", "1mp3d_0039_region73", "1mp3d_0065_region15", "1mp3d_0032_region2", "1mp3d_0018_region11", "1mp3d_0062_region4", "1mp3d_0035_region14", "1mp3d_0065_region21", "1mp3d_0035_region20", "1mp3d_0039_region32", "1mp3d_0043_region3", "1mp3d_0001_region46", "1mp3d_0076_region12", "1mp3d_0069_region0", "1mp3d_0013_region8", "1mp3d_0010_region16", "1mp3d_0061_region8", "1mp3d_0025_region4", "1mp3d_0001_region4", "1mp3d_0059_region5", "1mp3d_0001_region31", "1mp3d_0012_region41", "1mp3d_0012_region39", "1mp3d_0039_region71", "1mp3d_0084_region9", "1mp3d_0005_region26", "1mp3d_0040_region23", "1mp3d_0016_region17", "1mp3d_0003_region27", "1mp3d_0003_region25", "1mp3d_0039_region30", "1mp3d_0007_region2", "1mp3d_0002_region5", "1mp3d_0078_region2", "1mp3d_0076_region3", "1mp3d_0005_region10", "1mp3d_0038_region18", "1mp3d_0017_region30", "1mp3d_0001_region44", "1mp3d_0010_region14", "1mp3d_0035_region19", "1mp3d_0013_region25", "1mp3d_0013_region11", "1mp3d_0009_region3", "1mp3d_0050_region18", "1mp3d_0040_region60", "1mp3d_0000_region14", "1mp3d_0013_region7", "1mp3d_0007_region0", "1mp3d_0039_region6", "1mp3d_0022_region30", "1mp3d_0016_region15", "1mp3d_0038_region17", "1mp3d_0043_region1", "1mp3d_0000_region16", "1mp3d_0038_region23", "1mp3d_0001_region49", "1mp3d_0013_region5", "1mp3d_0025_region6", "1mp3d_0018_region13", "1mp3d_0067_region3", "1mp3d_0065_region23", "1mp3d_0052_region3", "1mp3d_0039_region47", "1mp3d_0035_region22", "1mp3d_0023_region17", "1mp3d_0073_region4", "1mp3d_0012_region36", "1mp3d_0061_region7", "1mp3d_0069_region2", "1mp3d_0040_region17", "1mp3d_0044_region36", "1mp3d_0048_region5", "1mp3d_0078_region14", "1mp3d_0023_region15", "1mp3d_0026_region27", "1mp3d_0046_region9", "1mp3d_0039_region48", "1mp3d_0010_region6", "1mp3d_0040_region56", "1mp3d_0076_region10", "1mp3d_0030_region10", "1mp3d_0048_region12", "1mp3d_0040_region59", "1mp3d_0009_region1", "1mp3d_0084_region6", "1mp3d_0066_region26", "1mp3d_0050_region17", "1mp3d_0013_region13", "1mp3d_0033_region18", "1mp3d_0046_region6", "1mp3d_0061_region5", "1mp3d_0046_region4", "1mp3d_0039_region45", "1mp3d_0082_region2", "1mp3d_0016_region2", "1mp3d_0040_region18", "1mp3d_0073_region19", "1mp3d_0057_region6", "1mp3d_0044_region43", "1mp3d_0040_region21", "1mp3d_0026_region28", "1mp3d_0036_region25", "1mp3d_0062_region6", "1mp3d_0067_region1", "1mp3d_0005_region24", "1mp3d_0026_region25", "1mp3d_0040_region62", "1mp3d_0025_region9", "1mp3d_0045_region13", "1mp3d_0082_region0", "1mp3d_0029_region30", "1mp3d_0069_region31", "1mp3d_0026_region13", "1mp3d_0018_region3", "1mp3d_0032_region0", "1mp3d_0026_region11", "1mp3d_0045_region8", "1mp3d_0073_region16", "1mp3d_0069_region46", "1mp3d_0008_region13", "1mp3d_0004_region3", "1mp3d_0033_region21", "1mp3d_0045_region5", "1mp3d_0044_region41", "1mp3d_0026_region5", "1mp3d_0039_region9", "1mp3d_0001_region9", "1mp3d_0078_region0", "1mp3d_0084_region4", "1mp3d_0081_region1", "1mp3d_0030_region12", "1mp3d_0002_region7", "1mp3d_0059_region7", "1mp3d_0066_region12", "1mp3d_0039_region4", "1mp3d_0050_region15", "1mp3d_0081_region3", "1mp3d_0060_region11", "1mp3d_0006_region17", "1mp3d_0010_region4", "1mp3d_0040_region54", "1mp3d_0001_region33", "1mp3d_0001_region6", "1mp3d_0040_region15", "1mp3d_0044_region34", "1mp3d_0062_region9", "1mp3d_0012_region43", "1mp3d_0059_region8", "1mp3d_0048_region8", "1mp3d_0073_region9", "1mp3d_0066_region24", "1mp3d_0066_region29", "1mp3d_0022_region32", "1mp3d_0036_region13", "1mp3d_0029_region32", "1mp3d_0043_region24", "1mp3d_0069_region33", "1mp3d_0038_region15", "1mp3d_0075_region0", "1mp3d_0040_region2", "1mp3d_0065_region17", "1mp3d_0043_region26", "1mp3d_0045_region7", "1mp3d_0073_region6", "1mp3d_0017_region32", "0ad2d386-79e2-2212-9b40-43d081db442a", "1c211546-f201-2d25-862b-6323518ec6e1", "8eabc44d-5af7-2f32-8529-ab5255a6ae7f", "6bde605d-9162-246f-8c75-fb2f7781bc40", "c12890dc-d3df-2d0d-87b5-90e9596c3de4", "0cac7646-8d6f-2d13-8d24-1bc724028d31", "4138584f-a238-2435-8242-4c9b86032127", "1d234024-e280-2b1a-8c28-2743fefed020", "baf6738b-8e94-22f8-8085-24c4e5ef5220", "ba6fda96-a4c1-2dca-8248-86e771ef875a", "0cac75d6-8d6f-2d13-8dd7-26f14b02c0e7", "09582214-e2c2-2de1-956a-64d8da4ba7cc", "a38a57da-8a91-2e91-93eb-0b7f675da4ba", "751a558c-fe61-2c3b-8f4e-340ddb43b8bd", "09582256-e2c2-2de1-9662-c4bc7ca7c497", "09582250-e2c2-2de1-9541-3efcbdc2dca4", "20c993c5-698f-29c5-8604-3248ede4091f", "8f0f144b-55de-28ce-8053-2828b87a0cc9", "b05fdd96-fca0-2d4f-88c3-d9dfda85c00e", "0cac759d-8d6f-2d13-8e8b-c3730d9fe563", "4a9a43e2-7736-2874-841a-f239715be4c6", "ddc737ad-765b-241a-9d9f-1f19fab18318", "1d233fe6-e280-2b1a-8caf-eb0d13a59ad6", "1c211556-f201-2d25-87f8-b0ac6ffb74fe", "8eabc435-5af7-2f32-85c3-163c1fa6e280", "1d234004-e280-2b1a-8ec8-560046b9fc96", "752cc5a3-920c-26f5-8ff3-49518eff94c6", "18d4d922-7eb5-280e-87bb-af3229fb1f13", "c92fb576-f771-2064-845a-a52a44a9539f", "20c9939b-698f-29c5-85a4-68c286bd7053", "68bae762-3567-2f7c-80f8-2d44a8d24782", "0f2f271b-b736-2f71-8f3a-e9265463abcb", "c8b34d02-5ea0-2d69-9179-be3dfe72d97b", "4fbad31a-465b-2a5d-8566-f4e4845c1a78", "cdcaf5b9-ddd8-2ed6-9407-e5600914b733", "0cac75fe-8d6f-2d13-8dd8-87c100e95f4d", "41385827-a238-2435-8320-d8d3eb507f5e", "6bde6079-9162-246f-8fad-d8f2358bd413", "6a36051f-fa53-2915-9497-19bbbf3df0c4", "68bae770-3567-2f7c-80b4-0ff21a8fee10", "7ab2a9c9-ebc6-2056-89b6-c78dbb4290d3", "8eabc44b-5af7-2f32-8553-18fd693ab49f", "8eabc45a-5af7-2f32-85ed-572ae21920df", "1c21154a-f201-2d25-8573-d3e22ddfe900", "d7d40d39-7a5d-2b36-972e-a7c800ca7bbf", "2e369529-e133-204c-91f5-6ab463d511c1", "5630cfe7-12bf-2860-8710-52729dc36cc6", "d7d40d6c-7a5d-2b36-9674-c435fa58a2d1", "4e858c89-fd93-2cb4-8459-7542184fd2ad", "0cac755a-8d6f-2d13-8fed-b1be02f4ef77", "c7895f07-339c-2d13-8176-7418b6e8d7ce", "38770c9d-86d7-27b8-869e-4f713b04f290", "7747a521-9431-24e8-84e7-7e01eaee08c4", "4fbad32b-465b-2a5d-8499-85100e88f454", "bcb0fe10-4f39-2c70-9c4e-602f1186d2e9", "0cac7623-8d6f-2d13-8da0-462e7fc0bea2", "ddc737b5-765b-241a-9efa-a3356768562d", "bf9a3db6-45a5-2e80-825a-e599db74ba26", "c12890d5-d3df-2d0d-86d3-b8dac6bea6a9", "185d741f-3698-223c-8a4c-5e77f82f9251", "6bde6051-9162-246f-8fa1-c88a8a60a97e", "422885e7-192d-25fc-8767-177a507a913c", "2e36952b-e133-204c-911e-7644cb34e8b2", "5555106a-36f1-29c0-8913-df1ba3c3cfd5", "7272e198-a01b-20f6-8a8b-6688d9ac044e", "43b8cae5-6678-2e38-98db-a2d49b1ecdfb", "531cff10-0021-28f6-8f94-80db8fdbbbee", "def7fbb9-48c2-2895-9249-16ce01ca2e59", "68bae75f-3567-2f7c-829d-7422117729f3", "751a55a1-fe61-2c3b-8df5-925bfeac2496", "c670793a-2ecb-2de2-8276-7d00f11b8f69", "6bde6083-9162-246f-8c9c-e170212059b2", "0cac7536-8d6f-2d13-8dc2-2f9d7aa62dc4", "68bae76e-3567-2f7c-82bd-a09641695364", "6bde609d-9162-246f-8e6e-a3f462f77042", "6bde6045-9162-246f-8dff-b27ff180a6ae", "e61b0e04-bada-2f31-82d6-72831a602ba7", "0cac767c-8d6f-2d13-8e22-50ba98af68e6", "2e369527-e133-204c-91cc-bb874b8fd4ae", "4238490a-60a7-271e-9c04-3846221dc354", "95be45d7-a558-22da-9c39-ea7e57c68be5", "8f0f1447-55de-28ce-83c5-092887498eea", "2e369523-e133-204c-91f9-6f3deaa8e11e", "bcb0fe2b-4f39-2c70-9d6b-5e92d634ac35", "5341b7ab-8a66-2cdd-865a-c27c4263d24a", "6bde6081-9162-246f-8c4e-ffaf709d17b1", "185d7421-3698-223c-8b02-a4befeb2dab9", "9c27de4f-6184-2cda-81d6-9c806607918e", "8eabc455-5af7-2f32-8606-a0bdbe6c537d", "569d8f05-72aa-2f24-899c-58b243c8189d", "0f2f2713-b736-2f71-8c10-dcecbef587f9", "0cac7613-8d6f-2d13-8d92-487a50d40794", "7747a50a-9431-24e8-85c6-3488c128ac26", "20c993b3-698f-29c5-859c-dca8ddecf220", "0a4b8ef6-a83a-21f2-8672-dce34dd0d7ca", "ea318260-0a4c-2749-9389-4c16c782c4b1", "b05fdd92-fca0-2d4f-89dd-723db1a8d78c", "09582252-e2c2-2de1-96bf-d41dd0362f39", "6a360547-fa53-2915-9606-a90f1c737290", "0958221b-e2c2-2de1-96b1-6233099811a0", "c7895f0d-339c-2d13-837c-6e5d8b7be7a8", "20c993a3-698f-29c5-86ee-4e979740ea56", "47319768-f9f7-2a1a-97ba-25a4f441e010", "e44d238c-52a2-2879-89d9-a29ba04436e0", "09582212-e2c2-2de1-9700-fa44b14fbded", "ddc737a3-765b-241a-9c99-f98a3c126686", "8e0f1c53-9e28-2339-8588-2b24d9e9f3e8", "fcf66d94-622d-291c-85fc-284b68d64c81", "0cac75c4-8d6f-2d13-8c37-fcfaf141ae5a", "6bde60ba-9162-246f-8eaf-b462f3cf6234", "0cac767e-8d6f-2d13-8d1a-d5e9b184df15", "f6f1d5ae-a401-23c0-89ad-8ddea9ae5a6c", "1776ad78-4db7-2333-887f-d6b6a617255a", "0958224a-e2c2-2de1-950b-a53ea2cb660d", "185d7423-3698-223c-8879-80ad9339afce", "185d7417-3698-223c-8a26-b3a5fce593d4", "0cac768c-8d6f-2d13-8cc8-7ace156fc3e7", "10b17971-3938-2467-8a86-2b42613aa7ec", "c6707944-2ecb-2de2-8369-bd291653f5c3", "56d957e9-0184-2301-8c3e-ff1c86c6b50c", "38770ca1-86d7-27b8-8619-ab66f67d9adf", "4e858c9d-fd93-2cb4-859e-c508af1e5449", "185d7429-3698-223c-8a42-8c2d18d51531", "bcb0fe0e-4f39-2c70-9e02-22b2f980cc35", "0cac761d-8d6f-2d13-8f35-2364ee20f2a9", "dc42b37a-8d5c-2d2a-84dd-83aff10e0abe", "6bde6059-9162-246f-8f04-3dc6f8624ad2", "fcf66dbe-622d-291c-8790-49f1fe8a02e5", "4fbad32f-465b-2a5d-8408-146ab1d72808", "0ad2d389-79e2-2212-9b0a-5f0e6f4982b5", "ab835f98-54c6-29a1-9b55-b33723227477", "bcb0fe29-4f39-2c70-9f18-79507a4e9a30", "4238490c-60a7-271e-9f38-3c651e3b3912", "0cac754d-8d6f-2d13-8c81-d134a19b0045", "8eabc46b-5af7-2f32-84ce-53d68eca91fb", "74ef8474-9dce-2d66-82e5-309429937089", "6bde60d8-9162-246f-8f11-834bec23f91e", "ba6fdaa6-a4c1-2dca-8137-f7b371e665f4", "bcb0fe2f-4f39-2c70-9eaf-a074d1b3e47b", "1mp3d_0069_region23", "scene0532_00", "scene0406_02", "scene0323_01", "1mp3d_0032_region19", "1mp3d_0005_region0", "0cac761f-8d6f-2d13-8c29-90f19c5f65c6", "scene0061_01", "0ad2d384-79e2-2212-9b18-72b44eb5463f", "scene0604_02", "c2d9933d-1947-2fbf-81fa-c8a7f9625eea", "scene0439_00", "e44d238f-52a2-2879-88ec-e383a4d9abf3", "c7895f8a-339c-2d13-82a6-bc530140d95d", "0cac755e-8d6f-2d13-8c6a-c0979ca34a4f", "scene0163_00", "scene0296_00", "scene0111_02", "scene0051_02", "scene0041_01", "scene0071_00", "scene0077_00", "scene0358_00", "scene0003_00", "scene0382_00", "scene0315_00", "scene0534_00", "scene0131_02", "scene0273_00", "scene0378_00", "scene0003_02", "scene0430_01", "scene0117_00", "scene0392_01", "scene0410_01", "scene0048_00", "scene0351_01", "scene0137_00", "scene0265_01", "scene0583_00", "scene0614_01", "scene0693_02", "scene0444_01", "scene0377_01", "scene0695_02", "scene0013_01", "scene0118_01", "scene0514_00", "scene0371_01", "scene0540_00", "scene0583_02", "scene0488_00", "scene0367_00", "scene0290_00", "scene0481_01", "scene0221_00", "scene0092_02", "scene0186_00", "scene0143_02", "scene0593_01", "scene0669_00", "scene0579_00", "scene0491_00", "scene0275_00", "scene0094_00", "scene0624_00", "scene0211_03", "scene0685_01", "scene0604_00", "scene0426_00", "scene0502_01", "scene0347_00", "scene0347_02", "scene0540_02", "scene0512_00", "scene0416_03", "scene0305_01", "scene0286_03", "scene0579_02", "scene0426_02", "scene0569_01", "scene0474_00", "scene0695_00", "scene0067_01", "scene0121_01", "scene0420_00", "scene0659_01", "scene0559_02", "scene0416_01", "scene0137_02", "scene0524_01", "scene0227_00", "scene0452_00", "scene0570_01", "scene0255_02", "scene0303_01", "scene0546_00", "scene0487_01", "scene0111_00", "scene0005_00", "scene0693_00", "scene0361_00", "scene0051_00", "scene0092_00", "scene0025_02", "scene0286_01", "scene0640_01", "scene0211_01", "scene0108_00", "scene0101_03", "scene0452_02", "scene0419_00", "scene0201_00", "scene0201_02", "scene0556_01", "scene0325_01", "scene0263_01", "scene0084_01", "scene0656_02", "scene0420_02", "scene0649_00", "scene0692_04", "scene0335_00", "scene0153_01", "scene0289_00", "scene0348_01", "scene0078_01", "scene0358_02", "scene0199_00", "scene0341_00", "scene0299_01", "scene0127_01", "scene0419_02", "scene0472_02", "scene0335_02", "scene0670_00", "scene0384_00", "scene0666_01", "scene0368_01", "scene0207_00", "scene0173_01", "scene0406_00", "scene0700_01", "scene0255_00", "scene0280_01", "scene0472_00", "scene0155_01", "scene0679_01", "scene0559_00", "scene0378_02", "scene0602_00", "scene0057_00", "scene0566_00", "scene0474_02", "scene0058_01", "scene0361_02", "scene0165_00", "scene0253_00", "scene0650_00", "scene0237_01", "scene0357_01", "scene0676_00", "scene0023_00", "scene0218_00", "scene0585_00", "scene0165_02", "scene0454_00", "scene0143_00", "scene0207_02", "scene0313_02", "scene0576_01", "scene0622_00", "scene0238_00", "scene0313_00", "scene0333_00", "scene0612_01", "scene0068_00", "scene0646_01", "scene0131_00", "scene0231_01", "scene0101_01", "scene0549_01", "scene0025_00", "scene0498_01", "scene0656_00", "scene0560_00", "scene0394_01", "scene0400_00", "scene0180_00", "scene0497_00", "scene0128_00", "scene0035_01", "scene0409_01", "scene0145_00", "1mp3d_0061_region26", "1mp3d_0060_region9", "1mp3d_0017_region22", "1mp3d_0044_region9", "1mp3d_0047_region21", "1mp3d_0027_region9", "1mp3d_0005_region2", "1mp3d_0012_region4", "1mp3d_0001_region54", "1mp3d_0064_region19", "1mp3d_0035_region47", "1mp3d_0069_region21", "1mp3d_0069_region17", "1mp3d_0012_region29", "1mp3d_0044_region6", "1mp3d_0071_region12", "1mp3d_0069_region18", "1mp3d_0067_region28", "1mp3d_0041_region14", "1mp3d_0039_region20", "1mp3d_0017_region14", "1mp3d_0036_region9", "1mp3d_0064_region16", "1mp3d_0050_region3", "1mp3d_0072_region5", "1mp3d_0044_region10", "1mp3d_0062_region15", "1mp3d_0012_region12", "1mp3d_0035_region48", "1mp3d_0011_region8", "1mp3d_0042_region28", "1mp3d_0027_region4", "1mp3d_0012_region26", "1mp3d_0050_region1", "1mp3d_0027_region12", "1mp3d_0044_region12", "1mp3d_0029_region16", "1mp3d_0067_region11", "1mp3d_0039_region57", "1mp3d_0047_region8", "1mp3d_0033_region31", "1mp3d_0002_region24", "1mp3d_0003_region6", "1mp3d_0041_region3", "1mp3d_0017_region3", "1mp3d_0001_region15", "1mp3d_0005_region36", "1mp3d_0074_region3", "1mp3d_0060_region4", "1mp3d_0044_region26", "1mp3d_0033_region33", "1mp3d_0014_region0", "1mp3d_0044_region24", "1mp3d_0014_region27", "1mp3d_0012_region24", "1mp3d_0019_region2", "1mp3d_0044_region29", "1mp3d_0011_region15", "1mp3d_0066_region2", "1mp3d_0001_region56", "1mp3d_0003_region4", "1mp3d_0066_region0", "1mp3d_0035_region5", "1mp3d_0012_region10", "1mp3d_0039_region63", "1mp3d_0038_region5", "1mp3d_0032_region20", "1mp3d_0032_region22", "1mp3d_0071_region9", "1mp3d_0059_region11", "1mp3d_0071_region4", "1mp3d_0062_region18", "1mp3d_0019_region12", "1mp3d_0041_region16", "1mp3d_0006_region3", "1mp3d_0029_region8", "1mp3d_0047_region7", "1mp3d_0057_region15", "1mp3d_0014_region13", "1mp3d_0011_region17", "1mp3d_0077_region11", "1mp3d_0067_region25", "1mp3d_0027_region10", "1mp3d_0035_region32", "1mp3d_0041_region1", "1mp3d_0022_region22", "1mp3d_0079_region3", "1mp3d_0011_region21", "1mp3d_0027_region24", "1mp3d_0041_region22", "1mp3d_0077_region13", "1mp3d_0022_region20", "1mp3d_0040_region46", "1mp3d_0004_region11", "1mp3d_0012_region9", "1mp3d_0033_region3", "1mp3d_0014_region25", "1mp3d_0038_region33", "1mp3d_0057_region21", "1mp3d_0006_region1", "1mp3d_0032_region16", "1mp3d_0062_region17", "1mp3d_0042_region0", "1mp3d_0035_region8", "1mp3d_0002_region26", "1mp3d_0083_region1", "1mp3d_0014_region11", "1mp3d_0077_region0", "1mp3d_0014_region2", "1mp3d_0012_region6", "1mp3d_0030_region0", "1mp3d_0011_region7", "1mp3d_0004_region13", "1mp3d_0017_region1", "1mp3d_0000_region5", "1mp3d_0083_region3", "1mp3d_0067_region27", "1mp3d_0057_region18", "1mp3d_0011_region5", "1mp3d_0074_region1", "1mp3d_0009_region10", "1mp3d_0029_region7", "1mp3d_0029_region14", "1mp3d_0029_region20", "1mp3d_0065_region1", "1mp3d_0040_region33", "1mp3d_0027_region26", "1mp3d_0041_region19", "1mp3d_0038_region7", "1mp3d_0029_region5", "1mp3d_0061_region24", "1mp3d_0047_region5", "1mp3d_0013_region40", "1mp3d_0022_region1", "1mp3d_0002_region12", "1mp3d_0011_region18", "1mp3d_0014_region28", "1mp3d_0001_region21", "1mp3d_0040_region49", "1mp3d_0033_region1", "1mp3d_0064_region14", "1mp3d_0071_region6", "1mp3d_0035_region7", "1mp3d_0072_region7", "1mp3d_0038_region8", "1mp3d_0041_region20", "1mp3d_0059_region13", "1mp3d_0003_region9", "1mp3d_0024_region5", "1mp3d_0047_region15", "1mp3d_0042_region2", "1mp3d_0009_region12", "1mp3d_0062_region23", "1mp3d_0072_region8", "1mp3d_0022_region16", "1mp3d_0030_region2", "1mp3d_0079_region1", "1mp3d_0022_region14", "1mp3d_0000_region8", "1mp3d_0008_region2", "1mp3d_0085_region7", "1mp3d_0039_region22", "1mp3d_0017_region19", "1mp3d_0029_region22", "1mp3d_0047_region18", "1mp3d_0057_region23", "1mp3d_0029_region19", "1mp3d_0017_region20", "1mp3d_0077_region2", "1mp3d_0005_region34", "1mp3d_0067_region13", "1mp3d_0044_region4", "1mp3d_0013_region42", "1mp3d_0022_region19", "1mp3d_0004_region25", "1mp3d_0042_region13", "1mp3d_0085_region5", "1mp3d_0001_region17", "1mp3d_0002_region10", "1mp3d_0042_region11", "1mp3d_0005_region41", "1mp3d_0032_region14", "1mp3d_0039_region58", "1mp3d_0035_region45", "1mp3d_0064_region22", "1mp3d_0062_region21", "1mp3d_0013_region37", "1mp3d_0047_region17", "1mp3d_0074_region14", "1mp3d_0001_region18", "1mp3d_0022_region3", "1mp3d_0042_region27", "1mp3d_0057_region17", "1mp3d_0069_region15", "1mp3d_0039_region19", "1mp3d_0019_region10", "1mp3d_0039_region16", "1mp3d_0035_region30", "1mp3d_0008_region0", "1mp3d_0071_region10", "1mp3d_0065_region3", "1mp3d_0005_region39", "1mp3d_0061_region10", "1mp3d_0017_region16", "1mp3d_0027_region6", "1mp3d_0047_region23", "1mp3d_0001_region23", "1mp3d_0039_region14", "1mp3d_0036_region6", "1mp3d_0019_region0", "1mp3d_0013_region38", "1mp3d_0040_region31", "1mp3d_0039_region61", "1mp3d_0036_region4", "1mp3d_0038_region31", "1mp3d_0000_region7", "1mp3d_0040_region44", "1mp3d_0064_region20", "1mp3d_0061_region12", "1mp3d_0060_region6", "1mp3d_0039_region55", "1mp3d_0013_region35", "1mp3d_0042_region25", "bf9a3db8-45a5-2e80-8112-d2c2429fdb79", "b8837e32-57ec-29c6-893c-77fce535b159", "1d233ffc-e280-2b1a-8c3a-af74ca2b0cea", "77361fce-d054-2a22-8bb2-a870ab051854", "751a55a3-fe61-2c3b-8d1b-daad80d1af30", "b1f23308-d255-2761-94da-981d962c6bf8", "422885e0-192d-25fc-844a-62e395291839", "a38a57d6-8a91-2e91-9090-e2625e960187", "c12890d8-d3df-2d0d-87cc-da303a47b893", "20c993af-698f-29c5-84b2-972451f94cfb", "f2c76fed-2239-29d0-8598-9ed42cec9dc5", "5341b7e3-8a66-2cdd-8709-66a2159f0017", "8f0f1467-55de-28ce-8331-b670a7274af9", "6ed38504-7db9-2d45-81b6-b57de59a603b", "baf0a8f4-26d4-2033-896f-27e7fa6a3c6d", "fcf66d7b-622d-291c-86b8-7db96aebcee3", "d7d40d73-7a5d-2b36-9709-0be87127a141", "bf9a3dd5-45a5-2e80-8306-ccbca5eea15a", "09582254-e2c2-2de1-9434-162187eb819e", "75c25989-9ca2-2844-97b4-31b81f7554b8", "0cac75b7-8d6f-2d13-8cb2-0b4e06913140", "a38a57dc-8a91-2e91-937b-fac2c2e4949f", "0cf75f4e-564d-23f5-8a9a-3ceecab0afe1", "6bde60c4-9162-246f-8c1e-b0c518544104", "0cac7554-8d6f-2d13-8f1f-5f6fcf08f931", "aa20278a-8bc8-2c5b-9c90-fec1272e21f7", "fcf66dae-622d-291c-862d-dc03f6f1d562", "1d2f8512-d757-207c-8c76-9ce179890827", "752cc585-920c-26f5-8e40-9e37e31cc861", "6bde60dc-9162-246f-8c8c-9403300d2636", "5341b7b5-8a66-2cdd-841d-805bfa2c44b5", "6a360545-fa53-2915-9484-a3de8990a423", "82def5dc-25ba-20f9-8ba3-3bb706799abd", "2451c048-fae8-24f6-9043-f1604dbada2c", "bf9a3def-45a5-2e80-83f8-65c351d6169b", "d7d40d79-7a5d-2b36-9740-15f8e687b25c", "4731977e-f9f7-2a1a-955d-8cdd12ec7337", "0958222a-e2c2-2de1-9474-35e601b3682a", "0cac753a-8d6f-2d13-8f04-2073b87af440", "7747a523-9431-24e8-86ca-377091c35f6e", "bf9a3d9e-45a5-2e80-83c6-4e427c5586a2", "352e9c3a-69fb-27a7-897c-83e7341b28eb", "0ad2d399-79e2-2212-99cf-7a3512734bd7", "bf9a3daa-45a5-2e80-8162-9bf768132559", "0cac7652-8d6f-2d13-8f0e-a1dd07a8a847", "1c21154e-f201-2d25-8794-7746336ad397", "8eabc405-5af7-2f32-86d2-d757fa7b019d", "ba6fda9e-a4c1-2dca-8381-c08ad16a6170", "0cac75f0-8d6f-2d13-8d86-371deca33bb4", "422885b1-192d-25fc-868c-110216f86479", "10b17938-3938-2467-8a37-21820ba7efc8", "4fbad331-465b-2a5d-8488-852fcda9513c", "09582228-e2c2-2de1-953d-f6f1ee4b3699", "f4f315fe-8408-2255-974c-e485355f9f9d", "6a36052d-fa53-2915-9764-30d81b2cc2b5", "5630cfd1-12bf-2860-86b2-e7a96bc32c19", "1d234018-e280-2b1a-8f22-da2b997865e8", "87e6cf79-9d1a-289f-845c-abe4deb8642f", "6bde6047-9162-246f-8f57-9da22a267db0", "0cac7578-8d6f-2d13-8c2d-bfa7a04f8af3", "41385847-a238-2435-838b-61864922c518", "20c99392-698f-29c5-8439-54bec948ecb1", "ddc737ab-765b-241a-9c5d-cf3cd8838f28", "ad408ca5-84db-2095-884e-e84bf0bb16d7", "0cac757e-8d6f-2d13-8f1d-ef6123d53655", "77361fb6-d054-2a22-8bc5-f4f1a2f34cfb", "b8837e3a-57ec-29c6-8b54-d440ca79a11f", "185d7427-3698-223c-8996-74a451349121", "baf0a8f0-26d4-2033-8948-52ae41fdd30f", "c7895f44-339c-2d13-8103-3e9dcc3be375", "a0906000-66f7-2272-9dcf-c57990d8eab4", "b8837e28-57ec-29c6-89cf-e1768353440f", "10b17967-3938-2467-88c5-a299519f9ad7", "751a559f-fe61-2c3b-8cec-258075450954", "355465d6-d29b-29f9-957c-e9ebbb5751ec", "0f2f2723-b736-2f71-8c94-f692cca76661", "fcf66d8a-622d-291c-8429-0e1109c6bb26", "c92fb57c-f771-2064-8536-7d7f40cfdf51", "280d8ebb-6cc6-2788-9153-98959a2da801", "10b17936-3938-2467-88ba-ba5fe2debf14", "0cf75f50-564d-23f5-8a6b-cf1f98afcbce", "eee5b056-ee2d-28f4-9bbe-f7ddf37895a6", "1d234000-e280-2b1a-8d5e-bd2403a8dbfa", "ba6fda9c-a4c1-2dca-8185-456923749a4e", "d7d40d6a-7a5d-2b36-954e-6faa597aea5f", "7272e196-a01b-20f6-880f-dbca4564e4a5", "baf0a8f2-26d4-2033-890f-b1b42adeac06", "6bde60e8-9162-246f-8f82-83326a675ee0", "c7895f40-339c-2d13-82c7-8a2810cdf02e", "ab835f8e-54c6-29a1-9aeb-3973d3d59915", "c7895f88-339c-2d13-80f8-a904e8f2fff0", "170725d2-9f1b-2d87-803b-c3210c0d30ff", "c7895f50-339c-2d13-828c-1d01eab90a00", "d7d40d60-7a5d-2b36-9573-5d5fe8bae949", "ffa41872-6f78-2040-8600-7b90c9a0fa89", "4d3d82b8-8cf4-2e04-803d-3091a545e57a", "cf00577c-b8f0-2aa8-869e-175d8b655d12", "f2c76ff1-2239-29d0-87f5-8a0346584384", "5341b7c1-8a66-2cdd-853b-598d8f2780ce", "1d233fee-e280-2b1a-8e0d-1dc297439839", "43b8cadf-6678-2e38-9920-064144c99406", "c7895f2b-339c-2d13-8248-b0507e050314", "4731976a-f9f7-2a1a-9737-305b709ca37f", "55551083-36f1-29c0-8876-7e0b46d7996d", "6ed38502-7db9-2d45-8219-6521d44b8025", "bf9a3de3-45a5-2e80-82fc-a5d0db3138c6", "ae73fa15-5a60-2398-8646-dd46c46a9a3d", "6bde60c6-9162-246f-8d25-4df09330450c", "73315a2b-185c-2c8a-8772-fe23ddd2f531", "0ad2d3a5-79e2-2212-9a9e-2502a05fa678", "47319774-f9f7-2a1a-9412-d4a1c89c8aa3", "0f2f271d-b736-2f71-8d77-dfc428e30c1e", "20c993b7-698f-29c5-847d-c8cb8a685f5a", "569d8f1e-72aa-2f24-8a3e-837f59c9e1dc", "8eabc465-5af7-2f32-840a-96fb8ac605cf", "dc42b378-8d5c-2d2a-8477-c1f077da4e56", "0cac763b-8d6f-2d13-8c67-de3d97b48d39", "0cac75ca-8d6f-2d13-8dfc-9932bfbb834d", "6bde60ec-9162-246f-8d34-324b01605c9c", "1d23400c-e280-2b1a-8d36-1f20202baaf9", "bf9a3dd7-45a5-2e80-8330-58904c51c1a9", "c92fb570-f771-2064-8700-e44ec1355c49", "a0905ffa-66f7-2272-9ca9-24c7ec00f34f", "c92fb5ad-f771-2064-8720-947242f8d474", "bcb0fe19-4f39-2c70-9cb7-a6993671cc91", "4731976c-f9f7-2a1a-95cc-31c4d1751d0b", "9c27de56-6184-2cda-8196-591957b6387d", "baf673a4-8e94-22f8-83af-b4e879365685", "75c2598b-9ca2-2844-967d-a16c92acf4ea", "02b33e01-be2b-2d54-93fb-4145a709cec5", "2e4a395e-d452-21a0-9e17-3e0b72d47337", "10b17934-3938-2467-8959-93297ad1b22d", "7747a508-9431-24e8-863c-c53906506d8b", "4e858c97-fd93-2cb4-8773-ac1f3171f4d1", "ba6fda98-a4c1-2dca-8230-bce60f5a0f85", "fcf66d9e-622d-291c-84c2-bb23dfe31327", "b3d355dd-7f5b-2091-86d0-b81f777eb9c1", "ddc73797-765b-241a-9e2c-097c5989baf6", "9c27de4b-6184-2cda-8293-fa3e68da0041", "6a360565-fa53-2915-974c-3a8844e1a451", "10b17932-3938-2467-8b89-08efc4dc9924", "10b1793a-3938-2467-8a84-eb8c081481b8", "8eabc437-5af7-2f32-8703-8e6aeb65db26", "c12890cc-d3df-2d0d-85cd-eebc3e1c4b62", "752cc59b-920c-26f5-8cbc-2b014d7b351c", "569d8f01-72aa-2f24-88fe-19788529b4a4", "1d233fe8-e280-2b1a-8fac-c3646a1cd64a", "0cac7680-8d6f-2d13-8e05-f6da6172d239", "1d2f851e-d757-207c-8c3f-db6373d91f11", "ab835f9f-54c6-29a1-9bd1-878814fe22f7", "4a9a43d2-7736-2874-874d-d0fad0570e19", "10b1793e-3938-2467-8b92-f56541e7ef9e", "87e6cf6b-9d1a-289f-866c-b90904d9487d", "1d23401c-e280-2b1a-8ecd-1d677f57530e", "c7895f3e-339c-2d13-832a-b3636f113063", "baf67397-8e94-22f8-8066-c32fbe984412", "8e0f1c28-9e28-2339-8584-ff06ff93c341", "1mp3d_0003_region23", "20c99399-698f-29c5-863d-607d5c927cbe", "scene0587_03", "c92fb580-f771-2064-866d-8aeb53e8360d", "scene0674_01", "1mp3d_0048_region3", "8eabc430-5af7-2f32-84da-1b01b293f2d3", "scene0520_00", "095821fb-e2c2-2de1-94df-20f2cb423bcb", "1mp3d_0033_region13", "1mp3d_0038_region11", "c92fb586-f771-2064-8678-a16eca03ac79", "scene0213_00", "scene0600_01", "scene0519_00", "scene0493_01", "scene0610_00", "scene0031_02", "scene0080_00", "scene0338_02", "scene0086_00", "scene0203_01", "scene0321_00", "scene0460_00", "scene0662_00", "scene0182_01", "scene0485_00", "scene0554_00", "scene0434_00", "scene0476_01", "scene0598_01", "scene0630_02", "scene0500_00", "scene0389_00", "scene0574_00", "scene0662_02", "scene0629_00", "scene0258_00", "scene0479_00", "scene0539_00", "scene0390_00", "scene0469_01", "scene0307_00", "scene0045_00", "scene0449_01", "scene0031_00", "scene0301_00", "scene0223_01", "scene0459_00", "scene0691_01", "scene0702_00", "scene0318_00", "scene0572_02", "scene0294_01", "scene0168_00", "scene0261_02", "scene0642_00", "scene0241_02", "scene0001_01", "scene0089_01", "scene0235_00", "scene0587_01", "scene0331_01", "scene0610_02", "scene0440_00", "scene0702_02", "scene0113_01", "scene0171_00", "scene0636_00", "scene0278_00", "scene0157_00", "scene0073_03", "scene0698_00", "scene0597_02", "scene0096_01", "scene0028_00", "scene0277_01", "scene0008_00", "scene0241_00", "scene0529_01", "scene0380_01", "scene0192_00", "scene0704_00", "scene0552_00", "scene0168_02", "scene0616_00", "scene0466_00", "scene0424_01", "scene0337_01", "scene0516_01", "scene0506_00", "scene0194_00", "scene0434_02", "scene0642_02", "scene0038_01", "scene0105_00", "scene0446_00", "scene0630_00", "scene0248_01", "scene0697_03", "scene0177_02", "scene0327_00", "scene0103_00", "scene0687_00", "scene0353_00", "scene0572_00", "scene0620_01", "scene0261_00", "scene0307_02", "scene0626_01", "scene0073_01", "scene0404_01", "scene0672_01", "scene0654_01", "scene0470_01", "scene0365_01", "scene0123_00", "scene0355_00", "scene0412_00", "scene0629_02", "scene0292_01", "scene0158_01", "scene0373_00", "scene0509_01", "scene0673_05", "scene0536_01", "scene0609_00", "scene0456_01", "scene0177_00", "scene0414_00", "scene0148_00", "scene0588_00", "scene0063_00", "scene0396_00", "scene0396_02", "scene0115_01", "scene0151_00", "scene0141_01", "scene0375_02", "scene0123_02", "scene0588_02", "scene0664_02", "scene0055_01", "scene0345_01", "scene0591_00", "scene0432_00", "scene0080_02", "scene0017_00", "scene0268_01", "scene0267_00", "scene0353_02", "scene0215_00", "scene0011_00", "scene0065_00", "scene0282_02", "scene0105_02", "scene0510_01", "scene0027_01", "scene0282_00", "scene0043_00", "scene0539_02", "scene0451_05", "scene0591_02", "scene0581_01", "scene0574_02", "scene0271_01", "scene0681_00", "scene0284_00", "scene0099_00", "scene0440_02", "scene0664_00", "scene0697_01", "scene0161_01", "scene0205_01", "scene0037_00", "scene0597_00", "scene0233_00", "scene0609_02", "scene0338_00", "scene0301_02", "scene0526_00", "scene0017_02", "scene0086_02", "scene0479_02", "scene0247_00", "scene0644_00", "scene0399_01", "scene0065_02", "scene0606_01", "scene0192_02", "scene0147_01", "scene0317_01", "scene0125_00", "scene0483_00", "scene0375_00", "1mp3d_0036_region23", "1mp3d_0064_region9", "1mp3d_0052_region7", "1mp3d_0018_region5", "1mp3d_0033_region27", "1mp3d_0041_region36", "1mp3d_0061_region3", "1mp3d_0001_region0", "1mp3d_0003_region15", "1mp3d_0013_region21", "1mp3d_0013_region23", "1mp3d_0065_region27", "1mp3d_0048_region19", "1mp3d_0060_region18", "1mp3d_0043_region19", "1mp3d_0050_region11", "1mp3d_0001_region35", "1mp3d_0066_region20", "1mp3d_0002_region3", "1mp3d_0065_region13", "1mp3d_0066_region19", "1mp3d_0039_region2", "1mp3d_0039_region36", "1mp3d_0040_region25", "1mp3d_0010_region0", "1mp3d_0005_region20", "1mp3d_0013_region15", "1mp3d_0018_region15", "1mp3d_0008_region17", "1mp3d_0069_region6", "1mp3d_0035_region51", "1mp3d_0009_region8", "1mp3d_0062_region0", "1mp3d_0075_region9", "1mp3d_0033_region25", "1mp3d_0005_region22", "1mp3d_0025_region0", "1mp3d_0048_region1", "1mp3d_0006_region13", "1mp3d_0064_region6", "1mp3d_0016_region4", "1mp3d_0039_region0", "1mp3d_0043_region14", "1mp3d_0008_region18", "1mp3d_0073_region2", "1mp3d_0026_region15", "1mp3d_0004_region8", "1mp3d_0038_region27", "1mp3d_0033_region11", "1mp3d_0030_region14", "1mp3d_0038_region28", "1mp3d_0012_region30", "1mp3d_0040_region52", "1mp3d_0023_region4", "1mp3d_0035_region26", "1mp3d_0004_region7", "1mp3d_0082_region4", "1mp3d_0003_region18", "1mp3d_0044_region45", "1mp3d_0038_region25", "1mp3d_0075_region6", "1mp3d_0046_region10", "1mp3d_0069_region37", "1mp3d_0048_region14", "1mp3d_0004_region5", "1mp3d_0026_region3", "1mp3d_0018_region7", "1mp3d_0043_region16", "1mp3d_0035_region24", "1mp3d_0032_region6", "1mp3d_0013_region17", "1mp3d_0043_region5", "1mp3d_0018_region18", "1mp3d_0066_region16", "1mp3d_0069_region38", "1mp3d_0016_region9", "1mp3d_0033_region28", "1mp3d_0039_region78", "1mp3d_0010_region12", "1mp3d_0039_region77", "1mp3d_0057_region2", "1mp3d_0064_region4", "1mp3d_0069_region4", "1mp3d_0038_region13", "1mp3d_0010_region10", "1mp3d_0001_region2", "1mp3d_0016_region11", "1mp3d_0040_region64", "1mp3d_0039_region39", "1mp3d_0040_region28", "1mp3d_0076_region14", "1mp3d_0025_region2", "1mp3d_0025_region12", "1mp3d_0005_region19", "1mp3d_0040_region50", "1mp3d_0078_region12", "1mp3d_0018_region8", "1mp3d_0060_region15", "1mp3d_0087_region3", "1mp3d_0001_region42", "1mp3d_0078_region4", "1mp3d_0003_region21", "1mp3d_0078_region10", "1mp3d_0016_region6", "1mp3d_0046_region0", "1mp3d_0009_region5", "1mp3d_0073_region0", "1mp3d_0043_region20", "1mp3d_0066_region14", "1mp3d_0013_region18", "1mp3d_0076_region7", "1mp3d_0026_region21", "1mp3d_0041_region34", "1mp3d_0067_region5", "1mp3d_0040_region9", "1mp3d_0001_region37", "1mp3d_0057_region0", "1mp3d_0065_region28", "1mp3d_0059_region3", "1mp3d_0010_region2", "1mp3d_0060_region17", "1mp3d_0032_region4", "1mp3d_0018_region23", "1mp3d_0040_region13", "1mp3d_0035_region10", "1mp3d_0026_region1", "1mp3d_0000_region10", "1mp3d_0061_region1", "1mp3d_0036_region21", "1mp3d_0018_region21", "1mp3d_0026_region17", "1mp3d_0026_region18", "1mp3d_0076_region5", "1mp3d_0035_region29", "1mp3d_0039_region75", "1mp3d_0066_region22", "1mp3d_0065_region25", "1mp3d_0030_region16", "1mp3d_0013_region3", "1mp3d_0084_region2", "1mp3d_0067_region8", "1mp3d_0043_region8", "1mp3d_0065_region11", "1mp3d_0076_region8", "1mp3d_0045_region3", "1mp3d_0067_region7", "1mp3d_0050_region13", "1mp3d_0043_region7", "1mp3d_0040_region6", "1mp3d_0006_region11", "1mp3d_0005_region16", "1mp3d_0046_region2", "1mp3d_0062_region2", "1mp3d_0044_region32", "1mp3d_0032_region9", "1mp3d_0075_region4", "1mp3d_0023_region11", "1mp3d_0046_region12", "1mp3d_0035_region12", "1mp3d_0001_region38", "1mp3d_0078_region9", "1mp3d_0018_region17", "1mp3d_0040_region27", "1mp3d_0023_region9", "1mp3d_0009_region7", "1mp3d_0012_region32", "1mp3d_0003_region17", "1mp3d_0039_region41", "1mp3d_0017_region34", "1mp3d_0001_region40", "1mp3d_0029_region34", "1mp3d_0048_region16", "1mp3d_0005_region14", "1mp3d_0082_region6", "1mp3d_0078_region6", "1mp3d_0012_region45", "1mp3d_0036_region18", "1mp3d_0045_region1", "1mp3d_0039_region43", "1mp3d_0002_region1", "1mp3d_0087_region1", "1mp3d_0069_region42", "1mp3d_0017_region36", "1mp3d_0036_region17", "1mp3d_0084_region0", "1mp3d_0023_region13", "1mp3d_0070_region1", "1mp3d_0073_region10", "1mp3d_0023_region6", "1mp3d_0016_region13", "1mp3d_0069_region9", "1mp3d_0043_region22", "1mp3d_0000_region12", "1mp3d_0036_region15", "1mp3d_0040_region11", "1mp3d_0039_region34", "1mp3d_0025_region10", "1mp3d_0013_region1", "1mp3d_0026_region23", "1mp3d_0073_region12", "1mp3d_0052_region5", "1mp3d_0040_region4", "1mp3d_0069_region40", "1mp3d_0044_region30", "1mp3d_0059_region1", "1mp3d_0069_region35", "1mp3d_0082_region9", "d7d40d4c-7a5d-2b36-95c1-5f6c9147caf0", "a0905fdb-66f7-2272-9fc5-7c0008d5e87b", "fa79392f-7766-2d5c-869a-f5d6cfb62fc6", "283ccfef-107c-24d5-8aa6-c570e923c134", "e2847ff3-a506-2b60-876a-e16960aa5fb2", "ffa41874-6f78-2040-85a8-8056ac60c764", "2451c03f-fae8-24f6-90ac-6ba38cab8c92", "ab835fa1-54c6-29a1-9982-116396773f00", "f4f31600-8408-2255-971c-b8c20605563a", "0ad2d395-79e2-2212-9b89-83581fad7390", "bcb0fe13-4f39-2c70-9fdd-eff98a9fbf7e", "77941464-cfdf-29cb-87f4-0465d3b9ab00", "6a36053b-fa53-2915-9716-6b5361c7791a", "baf67395-8e94-22f8-8100-9c9d7ce1c2b7", "ba6fdab2-a4c1-2dca-8159-a5da9cccb8ab", "0958222d-e2c2-2de1-9732-e2fb990692ef", "283ccff5-107c-24d5-886c-1d3a1319186a", "13124cc2-cec3-27a6-86ad-946fe3a73493", "1dd7209f-2ba0-22d9-8b9e-b5e270b2580f", "95be45db-a558-22da-9eac-5cea5debfcd8", "5341b79d-8a66-2cdd-85e0-e95b96563cd3", "10b17965-3938-2467-8997-5dabffaa41a9", "1d2f851a-d757-207c-8faa-3625b6dda1e5", "ab835fae-54c6-29a1-995e-b06cfc555786", "5630cfcd-12bf-2860-87f0-65937859709c", "e61b0e02-bada-2f31-82d0-80fc5c70bd6f", "6bde608d-9162-246f-8dde-3f158d134d50", "1d234010-e280-2b1a-8da8-205855a16b6b", "6bde6061-9162-246f-8d02-8c10ae9f10aa", "ba6fdaaa-a4c1-2dca-8163-a52b18bf6b64", "283ccff1-107c-24d5-8b6f-9bd3a42ca380", "c92fb5a9-f771-2064-86fc-ae25bdd558c4", "6bde60cd-9162-246f-8fad-fca80b4d6ad8", "0cac768a-8d6f-2d13-8dd3-3cbb7d916641", "dbeb4d0b-faf9-2324-99bf-259c104b313b", "c92fb592-f771-2064-8446-c2605c0202e9", "a0905fee-66f7-2272-9d07-032a9a4e7cc9", "c7895f76-339c-2d13-8291-06be5ae81d2e", "87e6cf6d-9d1a-289f-879a-543d3fa7ba74", "bf9a3de1-45a5-2e80-8325-6cf2b519d40c", "43b8caeb-6678-2e38-9933-b351784e8750", "531cff06-0021-28f6-8ce2-e13a4801e5a8", "0cac7654-8d6f-2d13-8e68-ddd1767454ad", "9766cbf5-6321-2e2f-8131-78c4e204635d", "0cac759b-8d6f-2d13-8e3b-2e3bc1ee1158", "1d2f8510-d757-207c-8c48-3684433860e1", "68bae766-3567-2f7c-825a-f0522da62564", "10b1794e-3938-2467-89a7-ebc89e84cf88", "7747a504-9431-24e8-8587-7e4443481e6c", "77361fca-d054-2a22-8974-547ca1fbb90f", "1d233ffe-e280-2b1a-8f1e-7ddb66c98d36", "742e8f17-be0a-294e-9dd3-52492d308e2b", "6bde608f-9162-246f-8e28-7c487fbbdfa1", "5341b7db-8a66-2cdd-85c5-66dbe881bd5f", "2ea047cd-aeca-2021-8b73-675adae64f19", "f2c76fe7-2239-29d0-84f5-144c30fd7451", "0cac7602-8d6f-2d13-8ca7-c26866c2e42f", "6993478e-1286-2e5b-82d0-eb36d75214de", "ab835faa-54c6-29a1-9b55-1a5217fcba19", "b8837e18-57ec-29c6-8919-d767d4811461", "754e884c-ea24-2175-8b34-cead19d4198d", "6a360523-fa53-2915-9506-4b95fa02cc56", "0cac7582-8d6f-2d13-8d4b-e4041cb166c4", "ebc4204c-82a4-2113-85cb-529a884b1629", "c2d99347-1947-2fbf-834b-f95790c125dd", "754e884e-ea24-2175-88bf-5fd89d996e36", "73315a27-185c-2c8a-848a-46d989c1112a", "20c993ad-698f-29c5-8580-879f93258149", "a0906008-66f7-2272-9e70-748b317145dd", "569d8f15-72aa-2f24-8b0a-2a9bbe4c9a3d", "569d8f11-72aa-2f24-88ea-bd6aa4f1d84c", "0ad2d3a7-79e2-2212-9a1b-8737842a24e2", "c7895f1b-339c-2d13-8348-d178c1c4b687", "73315a33-185c-2c8a-87a3-7915ecadfa45", "1d2f8518-d757-207c-8d4a-b2f43254c68f", "210cdbc5-9e8d-2832-853e-137542cf1a9b", "d9725be1-7513-2d91-8723-770169ff5b75", "42384908-60a7-271e-9c46-01e562c8974c", "422885ab-192d-25fc-8448-e7f34c7b5eea", "10b17944-3938-2467-8bac-5552375e4467", "b8837e34-57ec-29c6-8b50-775098de406a", "6bde6077-9162-246f-8c20-c85ea2964342", "d7d40d46-7a5d-2b36-9734-659bccb1c202", "75c25975-9ca2-2844-9769-84677f46d4cf", "baf673aa-8e94-22f8-815f-0a3f3d250071", "c92fb5b7-f771-2064-87a9-31c819832405", "7272e161-a01b-20f6-8b5a-0b97efeb6545", "4238491e-60a7-271e-9fe8-eb04b4209883", "0cac7621-8d6f-2d13-8cb1-ac43b90f4141", "fcf66da4-622d-291c-8642-c11ea83a329c", "e48c48f4-8b2f-2858-88c2-fd5e4c33f5b8", "a0905ffc-66f7-2272-9f05-966bab3ce8ad", "751a5588-fe61-2c3b-8f6d-1e454269dd55", "4d3d82b0-8cf4-2e04-80a8-c955ea964c2f", "d9725be5-7513-2d91-8518-d324d081f19c", "20c993cd-698f-29c5-8575-6a18e0e920cc", "ebc4204e-82a4-2113-8726-a494a47ce349", "8e0f1c39-9e28-2339-8432-ca5ca8653c58", "4731977a-f9f7-2a1a-97ae-97ad06041da5", "634d11cb-6833-255d-8ff4-b1812575e7dd", "bf9a3de9-45a5-2e80-8022-277108d67404", "6bde60da-9162-246f-8ec0-a5f4760ef7b8", "0cac760f-8d6f-2d13-8d9d-2d8df8f8cb6e", "e2847ff7-a506-2b60-869c-2780e8694ae0", "c7895f7a-339c-2d13-82ac-09ef1c9001ba", "7ab2a9c3-ebc6-2056-8965-ecbce808b905", "569d8f0d-72aa-2f24-8ac6-c6ee8d927c4b", "18d4d924-7eb5-280e-8764-f9654f215144", "6e67e550-1209-2cd0-8294-7cc2564cf82c", "10b1794c-3938-2467-8b2b-4e5c91cc5cc3", "c92fb58a-f771-2064-8432-02b46f2b3e49", "c670793d-2ecb-2de2-81fe-76776e342133", "fcf66dba-622d-291c-8537-1ab5313bc52a", "fcf66db2-622d-291c-8493-4f2517282f3f", "c7895f0b-339c-2d13-80e2-1d2aa04aa528", "b8837e26-57ec-29c6-8912-0cf70aa80f98", "13124cb2-cec3-27a6-8583-cc0a9d6d1a9a", "c9fb7aa5-2a5b-2cf7-902e-42a0339c7242", "1d2f8514-d757-207c-8e40-377957df6f67", "b1cf996a-9fdd-2189-95b2-2a97331daf35", "ad408ca3-84db-2095-89cf-05d249a54412", "75c259b9-9ca2-2844-9473-43d990560f90", "dbeb4d06-faf9-2324-9bca-4df3f194818e", "def7fbc1-48c2-2895-91d9-cb5e6f3e3589", "baf67389-8e94-22f8-8055-1d869ecfede9", "0cac75ab-8d6f-2d13-8fea-b1eb7e9bf6e7", "c7895f25-339c-2d13-820a-5cfc7d4d3359", "77361fbe-d054-2a22-885b-a5437892d854", "0cac75c8-8d6f-2d13-8c08-b3c40c58e0f7", "c7895f78-339c-2d13-82bb-cc990cbbc90f", "bf9a3df1-45a5-2e80-8198-0652e415e289", "4e858ca1-fd93-2cb4-84b6-490997979830", "4e858c8f-fd93-2cb4-8676-36cea7ef23cc", "c92fb5b3-f771-2064-86f2-f14da264bfcf", "c92fb58c-f771-2064-8496-688a5baaf5c6", "6bde605f-9162-246f-8d03-644a980c1cbb", "8eabc43e-5af7-2f32-84f8-4bb45c0d6460", "20c993bd-698f-29c5-8494-5556ba7d3fe9", "8e0f1c26-9e28-2339-8799-53c13e81d9ff", "634d11d9-6833-255d-8fa2-ce325873192d", "b1d87fb2-e72e-2c8c-9f82-ce7e4b3808e2", "54b263a3-0199-2df4-87db-40539528902d", "7ab2a9cf-ebc6-2056-88d4-865e66cc1940", "4d3d82a8-8cf4-2e04-8025-48aca980e0be", "0cac7676-8d6f-2d13-8f3a-d7bf7f03e721", "d63767c3-3205-226c-98c2-fdccc047d36e", "7ab2a9d3-ebc6-2056-8afd-e80dc7e1558f", "5341b7d9-8a66-2cdd-844b-cfdadb69ed63", "1d2f850e-d757-207c-8eea-5d41656673f4", "8eabc441-5af7-2f32-85e5-f1bd241503de", "569d8f09-72aa-2f24-887c-26497a35f26a", "a38a57e0-8a91-2e91-9349-9a2e62dbda62", "422885b7-192d-25fc-86da-e1beaae7c8ba", "198aaa74-0ba3-26f6-8792-66b6388b17ca", "6bde6074-9162-246f-8d34-5d8a64c07009", "4e858c85-fd93-2cb4-87e4-8d8ff1f31652", "10b17973-3938-2467-8906-5ba39b48458d", "fcf66d82-622d-291c-87be-78d421381146", "baf673ac-8e94-22f8-83d4-31a21cc201af", "6bde60cf-9162-246f-8f98-6355d75494c2", "0988ea72-eb32-2e61-8344-99e2283c2728", "2e36953b-e133-204c-931b-a2cf0f93fed6", "75c25999-9ca2-2844-9761-7642c9829210", "18d4d926-7eb5-280e-87df-199e66f2babd", "scene0463_01", "1mp3d_0044_region16", "2a7f9476-080c-26f9-86e9-c7ce1c76fc07", "scene0162_00", "1mp3d_0069_region28", "1mp3d_0057_region13", "5341b79f-8a66-2cdd-84d5-6dba6525dd75", "1mp3d_0000_region1", "c92fb5a2-f771-2064-8557-1dcf9c0e31a8", "1mp3d_0014_region17", "1d234012-e280-2b1a-8dea-0d1d53e0572d", "1mp3d_0042_region6", "scene0418_02", "1mp3d_0057_region27", "scene0220_02", "scene0129_00", "1mp3d_0002_region16", "0cac75f2-8d6f-2d13-8f0f-f00e880b4e7b", "ab835fa7-54c6-29a1-997b-3804159c15ea", "1mp3d_0035_region36", "scene0523_01", "scene0547_00", "scene0101_05", "scene0244_01", "scene0421_02", "scene0455_00", "scene0393_01", "scene0236_01", "scene0578_00", "scene0369_01", "scene0050_00", "scene0297_02", "scene0095_00", "scene0181_02", "scene0219_00", "scene0210_01", "scene0582_02", "scene0152_01", "scene0332_00", "scene0083_01", "scene0437_01", "scene0505_01", "scene0421_00", "scene0438_00", "scene0568_01", "scene0076_00", "scene0613_01", "scene0272_00", "scene0558_00", "scene0349_01", "scene0548_01", "scene0312_00", "scene0453_00", "scene0264_01", "scene0346_00", "scene0558_02", "scene0603_00", "scene0116_00", "scene0040_01", "scene0411_01", "scene0677_00", "scene0445_01", "scene0535_00", "scene0032_01", "scene0288_02", "scene0427_00", "scene0050_02", "scene0142_00", "scene0668_00", "scene0004_00", "scene0198_00", "scene0692_00", "scene0200_00", "scene0547_02", "scene0206_00", "scene0069_00", "scene0366_00", "scene0692_02", "scene0513_00", "scene0172_01", "scene0671_00", "scene0651_02", "scene0633_01", "scene0383_02", "scene0571_01", "scene0651_00", "scene0376_01", "scene0584_02", "scene0220_00", "scene0605_00", "scene0187_00", "scene0046_01", "scene0541_02", "scene0474_04", "scene0385_00", "scene0085_01", "scene0116_02", "scene0002_00", "scene0291_02", "scene0079_01", "scene0533_00", "scene0379_00", "scene0557_01", "scene0401_00", "scene0164_02", "scene0408_01", "scene0110_00", "scene0694_00", "scene0473_00", "scene0297_00", "scene0489_02", "scene0490_00", "scene0110_02", "scene0359_00", "scene0012_01", "scene0480_01", "scene0418_00", "scene0302_01", "scene0465_01", "scene0109_00", "scene0592_01", "scene0489_00", "scene0022_00", "scene0356_01", "scene0274_00", "scene0667_01", "scene0312_02", "scene0505_03", "scene0647_01", "scene0209_01", "scene0407_00", "scene0615_01", "scene0136_00", "scene0350_01", "scene0541_00", "scene0197_01", "scene0229_01", "scene0340_02", "scene0093_00", "scene0252_00", "scene0582_00", "scene0100_01", "scene0239_02", "scene0340_00", "scene0625_00", "scene0226_00", "scene0024_00", "scene0126_01", "scene0174_01", "scene0314_00", "scene0254_00", "scene0049_00", "scene0092_04", "scene0239_00", "scene0070_00", "scene0274_02", "scene0578_02", "scene0395_01", "scene0657_00", "scene0059_01", "scene0648_00", "scene0584_00", "scene0515_00", "scene0120_01", "scene0144_00", "scene0475_02", "scene0678_01", "scene0635_01", "scene0334_02", "scene0034_01", "scene0191_01", "scene0181_00", "scene0164_00", "scene0242_01", "scene0525_01", "scene0385_02", "scene0130_00", "scene0093_02", "scene0567_00", "scene0332_02", "scene0515_02", "scene0496_00", "scene0383_00", "scene0334_00", "scene0056_00", "scene0360_00", "scene0324_01", "scene0106_01", "scene0623_00", "scene0370_01", "scene0200_02", "scene0701_01", "scene0136_02", "scene0684_01", "scene0677_02", "scene0291_00", "scene0561_00", "scene0206_02", "scene0288_00", "scene0262_01", "scene0475_00", "scene0024_02", "scene0428_01", "scene0060_01", "1mp3d_0072_region1", "1mp3d_0047_region3", "1mp3d_0038_region3", "1mp3d_0041_region12", "1mp3d_0059_region18", "1mp3d_0035_region1", "1mp3d_0047_region27", "1mp3d_0074_region7", "1mp3d_0029_region24", "1mp3d_0022_region10", "1mp3d_0017_region5", "1mp3d_0040_region42", "1mp3d_0047_region28", "1mp3d_0019_region14", "1mp3d_0041_region29", "1mp3d_0074_region8", "1mp3d_0014_region23", "1mp3d_0012_region2", "1mp3d_0062_region11", "1mp3d_0047_region1", "1mp3d_0071_region16", "1mp3d_0040_region38", "1mp3d_0061_region14", "1mp3d_0079_region8", "1mp3d_0079_region11", "1mp3d_0060_region0", "1mp3d_0041_region24", "1mp3d_0069_region25", "1mp3d_0035_region39", "1mp3d_0077_region9", "1mp3d_0019_region19", "1mp3d_0072_region11", "1mp3d_0006_region8", "1mp3d_0040_region35", "1mp3d_0047_region25", "1mp3d_0017_region12", "1mp3d_0066_region9", "1mp3d_0003_region2", "1mp3d_0001_region28", "1mp3d_0050_region7", "1mp3d_0027_region0", "1mp3d_0027_region16", "1mp3d_0005_region4", "1mp3d_0079_region7", "1mp3d_0047_region13", "1mp3d_0022_region29", "1mp3d_0041_region26", "1mp3d_0061_region19", "1mp3d_0014_region15", "1mp3d_0071_region0", "1mp3d_0058_region0", "1mp3d_0057_region25", "1mp3d_0039_region12", "1mp3d_0019_region16", "1mp3d_0017_region29", "1mp3d_0082_region12", "1mp3d_0069_region11", "1mp3d_0014_region4", "1mp3d_0040_region40", "1mp3d_0067_region17", "1mp3d_0085_region1", "1mp3d_0036_region2", "1mp3d_0042_region4", "1mp3d_0067_region18", "1mp3d_0061_region20", "1mp3d_0006_region7", "1mp3d_0036_region0", "1mp3d_0033_region8", "1mp3d_0004_region15", "1mp3d_0005_region30", "1mp3d_0001_region27", "1mp3d_0042_region18", "1mp3d_0057_region11", "1mp3d_0029_region26", "1mp3d_0019_region4", "1mp3d_0029_region29", "1mp3d_0029_region12", "1mp3d_0077_region4", "1mp3d_0039_region53", "1mp3d_0058_region2", "1mp3d_0062_region25", "1mp3d_0005_region6", "1mp3d_0084_region11", "1mp3d_0044_region22", "1mp3d_0002_region20", "1mp3d_0001_region52", "1mp3d_0069_region13", "1mp3d_0079_region13", "1mp3d_0039_region65", "1mp3d_0062_region13", "1mp3d_0014_region9", "1mp3d_0039_region29", "1mp3d_0041_region10", "1mp3d_0005_region9", "1mp3d_0042_region21", "1mp3d_0019_region22", "1mp3d_0024_region3", "1mp3d_0022_region24", "1mp3d_0027_region14", "1mp3d_0044_region2", "1mp3d_0017_region10", "1mp3d_0011_region3", "1mp3d_0061_region16", "1mp3d_0033_region7", "1mp3d_0066_region6", "1mp3d_0013_region44", "1mp3d_0019_region6", "1mp3d_0024_region1", "1mp3d_0047_region11", "1mp3d_0064_region10", "1mp3d_0032_region26", "1mp3d_0008_region6", "1mp3d_0067_region23", "1mp3d_0019_region9", "1mp3d_0082_region10", "1mp3d_0014_region6", "1mp3d_0044_region14", "1mp3d_0069_region27", "1mp3d_0042_region17", "1mp3d_0041_region7", "1mp3d_0011_region1", "1mp3d_0022_region8", "1mp3d_0014_region21", "1mp3d_0032_region10", "1mp3d_0032_region24", "1mp3d_0017_region7", "1mp3d_0002_region22", "1mp3d_0002_region14", "1mp3d_0064_region12", "1mp3d_0035_region43", "1mp3d_0040_region37", "1mp3d_0027_region20", "1mp3d_0057_region28", "1mp3d_0022_region12", "1mp3d_0039_region67", "1mp3d_0004_region18", "1mp3d_0065_region5", "1mp3d_0039_region24", "1mp3d_0041_region5", "1mp3d_0067_region15", "1mp3d_0077_region18", "1mp3d_0042_region23", "1mp3d_0012_region19", "1mp3d_0071_region14", "1mp3d_0044_region20", "1mp3d_0000_region3", "1mp3d_0039_region26", "1mp3d_0077_region6", "1mp3d_0033_region5", "1mp3d_0083_region5", "1mp3d_0072_region3", "1mp3d_0003_region0", "1mp3d_0035_region41", "1mp3d_0062_region28", "1mp3d_0074_region5", "1mp3d_0084_region13", "1mp3d_0022_region7", "1mp3d_0038_region1", "1mp3d_0042_region15", "1mp3d_0039_region80", "1mp3d_0027_region22", "1mp3d_0004_region21", "1mp3d_0013_region33", "1mp3d_0030_region6", "1mp3d_0012_region0", "1mp3d_0038_region35", "1mp3d_0044_region0", "1mp3d_0059_region17", "1mp3d_0011_region11", "1mp3d_0085_region3", "1mp3d_0039_region10", "1mp3d_0077_region15", "1mp3d_0008_region4", "1mp3d_0004_region17", "1mp3d_0050_region8", "1mp3d_0062_region27", "1mp3d_0066_region30", "1mp3d_0017_region8", "1mp3d_0001_region11", "1mp3d_0039_region51", "1mp3d_0065_region7", "1mp3d_0050_region5", "1mp3d_0012_region16", "1mp3d_0004_region23", "1mp3d_0014_region18", "1mp3d_0011_region13", "1mp3d_0066_region4", "1mp3d_0001_region25", "1mp3d_0012_region20", "1mp3d_0027_region19", "1mp3d_0022_region26", "1mp3d_0044_region19", "1mp3d_0059_region15", "1mp3d_0042_region9", "1mp3d_0012_region22", "1mp3d_0012_region14", "1mp3d_0029_region10", "1mp3d_0077_region17", "1mp3d_0074_region12", "1mp3d_0065_region8", "1mp3d_0039_region68", "1mp3d_0001_region50", "1mp3d_0061_region22", "1mp3d_0001_region13", "1mp3d_0008_region9", "1mp3d_0035_region3", "1mp3d_0071_region2", "1mp3d_0029_region3", "1mp3d_0060_region2", "1mp3d_0002_region19", "1mp3d_0017_region24", "1mp3d_0022_region5", "1mp3d_0013_region46", "1mp3d_0035_region34", "1mp3d_0006_region5", "1mp3d_0041_region8", "1mp3d_0013_region31", "1mp3d_0079_region5", "1mp3d_0030_region9", "1mp3d_0067_region21", "1mp3d_0032_region12", "1mp3d_0005_region32", "1mp3d_0019_region20", "1mp3d_0017_region26", "1mp3d_0029_region1", "1mp3d_0030_region4", "1mp3d_0027_region2", "1mp3d_0074_region10", "1mp3d_0009_region14", "1mp3d_0071_region19", "19f1a897-a988-2bf8-8f8d-f3b64da81c2a", "5630cfc9-12bf-2860-84ed-5bb189f0e94e", "4acaebba-6c10-2a2a-8650-34c2f160db99", "0cac7625-8d6f-2d13-8f72-9672f2345ef9", "4d3d82ba-8cf4-2e04-8218-432a802344be", "fcf66daa-622d-291c-8548-a1163ee299b4", "c92fb590-f771-2064-8665-19d71d08c0ef", "2e4a3964-d452-21a0-9de5-4f7895499143", "0cac7627-8d6f-2d13-8eac-41764b5f2c5b", "7747a500-9431-24e8-8705-84f904e3bc57", "752cc57a-920c-26f5-8ce3-828413bc3cd4", "20c99397-698f-29c5-8534-5304111c28af", "a0905fd9-66f7-2272-9dfb-0483fdcc54c7", "0f2f2717-b736-2f71-8d6a-28817c01a17f", "55551091-36f1-29c0-8bae-687acbf664a0", "bf9a3dba-45a5-2e80-8282-0ee19d0447c7", "5630cfd3-12bf-2860-8749-9dacb499fb14", "c7895f86-339c-2d13-82c8-3976988cd327", "ba6fdaa2-a4c1-2dca-81a8-2aacd785edd7", "bf9a3de5-45a5-2e80-80ff-056b013f1064", "ad408ca1-84db-2095-8970-40e09dac6ed0", "ddc73795-765b-241a-9c5d-b97744afe077", "634d11c7-6833-255d-8d76-86fe90beafdc", "55551068-36f1-29c0-8896-0fa81b7a1fe7", "1776ad7a-4db7-2333-8afa-5fb5440b2edc", "b901681b-e754-293c-8c97-c988f204f2b6", "6bde60de-9162-246f-8cb4-87b39c9ac5e7", "6bde60a3-9162-246f-8ca3-48f7e86e95b8", "0cf75f4b-564d-23f5-899f-ff97701d3f3b", "198aaa76-0ba3-26f6-84de-6c13263a60bc", "a8952595-9035-254b-8c74-9c22ced19216", "0cac7611-8d6f-2d13-8f47-8ec4a6375ac3", "c12890e0-d3df-2d0d-87f7-9b8b04f663a5", "bf9a3dcb-45a5-2e80-83df-181fa8da160f", "ca653994-dbf9-207c-91a3-9085f4f169c8", "c7895f35-339c-2d13-805c-47570e126422", "bcb0fe1d-4f39-2c70-9e89-5c098ed27d6d", "8f0f144d-55de-28ce-8075-69a0a3b631b5", "10b1795f-3938-2467-8aa6-c985377d32fb", "ba6fda9a-a4c1-2dca-8264-792c15aa9ef9", "02b33dfd-be2b-2d54-91d2-55454852009e", "75c259a5-9ca2-2844-9441-d72912c1e696", "43b8cae3-6678-2e38-9b67-5905de29f6d7", "0cac7660-8d6f-2d13-8ecb-3e29d78a7718", "4e858c99-fd93-2cb4-8781-d0e7fc86003a", "b1f23305-d255-2761-95c6-9d89279ef29e", "3b7b33a9-1b11-283e-9b02-e8f35f6ba24c", "8f0f145e-55de-28ce-8203-45b4e2bb36d9", "41385867-a238-2435-8152-dc84ef14eae1", "ae73fa17-5a60-2398-86e8-aa1145c0c9b7", "42384914-60a7-271e-9c5f-f0e1eee114ae", "752cc595-920c-26f5-8d66-d08f8f111c92", "28298cd1-15b5-24be-8a19-49c0bf7c55b3", "09582209-e2c2-2de1-9610-08baed932919", "a895258d-9035-254b-8e61-2307a9926e62", "1713f859-5300-2104-841c-c60e88ded469", "77361fc8-d054-2a22-8868-c3de4283fff7", "4acaebbe-6c10-2a2a-866a-f0e58185caf7", "20c993b1-698f-29c5-865d-3a86c19acb3b", "c7895f39-339c-2d13-81c3-261003fc3b01", "a38a57de-8a91-2e91-90b2-910244171e92", "422885e5-192d-25fc-871f-83fa4d7af432", "0cac7580-8d6f-2d13-8c9d-d45247b5244b", "7747a4ee-9431-24e8-87be-433088b665c2", "5630cfe9-12bf-2860-840b-7363340dd0c4", "a0905fe8-66f7-2272-9e46-073291573af6", "ad408c99-84db-2095-8b23-0a011526b47b", "55551079-36f1-29c0-8bcf-aa3b2450b9c5", "c7895f61-339c-2d13-8004-3cbebf317f21", "2e4a3966-d452-21a0-9c4b-c021c4b66ce3", "bcb0fe06-4f39-2c70-9c24-a8dd7496c2f8", "0cac764a-8d6f-2d13-8fbd-2966f09c65d0", "c2d99343-1947-2fbf-808f-92dbb7d47aa5", "e48c48f8-8b2f-2858-89d1-36e9b49a06b0", "1d234006-e280-2b1a-8e34-b2f670259e8d", "fcf66dbc-622d-291c-8481-6e8761c93e21", "20c993a5-698f-29c5-8565-40e064af0fc4", "0cac762f-8d6f-2d13-8e48-ed83163799fb", "bcb0fe17-4f39-2c70-9d19-dafd03e967d8", "2e36954b-e133-204c-92ad-1a66c6f63e1a", "09582205-e2c2-2de1-9475-1cdac7639e60", "a0905fea-66f7-2272-9d2c-15612116fe96", "38770c95-86d7-27b8-8717-3485b411ddc7", "41385849-a238-2435-81d0-ceb0eba4541a", "75c25977-9ca2-2844-97ba-92479480fc00", "c6707938-2ecb-2de2-811c-69f6f7a0e638", "c6707946-2ecb-2de2-8381-d5eae12243ee", "fcf66db4-622d-291c-8720-d0a6bbd17846", "d7d40d68-7a5d-2b36-9671-6d4fcab1912a", "1776ad84-4db7-2333-8aa7-2cc9126d5f71", "20c993a1-698f-29c5-8716-5a937fdd879a", "ab835fa3-54c6-29a1-9a47-92dbaf9eb4b9", "63b87cef-ef3f-28f2-87a7-81c3fffbfa2d", "dbeb4d09-faf9-2324-9b85-dabd70dba4d0", "b8837e1f-57ec-29c6-88ad-4c19a5afbf36", "74ef846e-9dce-2d66-83d5-294aac7b1b0f", "19f1a892-a988-2bf8-8c91-0705cf396888", "0cac7637-8d6f-2d13-8e6f-f79dce250314", "5ed77dd4-c4f5-27a0-8476-f3048ea53ef5", "55551073-36f1-29c0-8a7d-91902bdddb1f", "56d957ed-0184-2301-8f4f-616c3b537e45", "6a36053f-fa53-2915-9579-3938283bc154", "1e0ccbe6-9783-2bd7-8d51-ca9e9226bf35", "c2d99345-1947-2fbf-818d-90ea82acef29", "d7d40d4e-7a5d-2b36-97e7-34324c52ac42", "ad408c8f-84db-2095-8a45-03100fbc4f86", "7272e17c-a01b-20f6-8b2f-e659331ae41a", "bf9a3dc3-45a5-2e80-832d-842aa34cc859", "ad408c9d-84db-2095-89c7-8a54f6260252", "0ad2d39b-79e2-2212-99ae-830c292cd079", "1776ad8a-4db7-2333-8909-97b950726515", "0ad2d382-79e2-2212-98b3-641bf9d552c1", "0cac7549-8d6f-2d13-8d56-b895956f571a", "0cac75f4-8d6f-2d13-8c10-ce688f7ac0af", "ddc737a7-765b-241a-9f27-0958b929deb5", "d63767bc-3205-226c-9b7c-72e4a9c0a79f", "ddc737a5-765b-241a-9cd2-c6e7a89aa43d", "0cac75af-8d6f-2d13-8f9e-ed3f62665aed", "5555107d-36f1-29c0-896a-5c8ccfb81965", "09582216-e2c2-2de1-97de-efcab1ef9c43", "5341b7c3-8a66-2cdd-8651-225a7489523a", "0f2f271f-b736-2f71-8f74-15fd679c80d5", "bcb0fe1f-4f39-2c70-9e7f-a9d783c159fc", "47319770-f9f7-2a1a-9583-5aa0374a4d35", "352e9c36-69fb-27a7-889f-69b450a22b74", "c92fb5a7-f771-2064-8766-1200dca296ac", "dbeb4d67-faf9-2324-9960-d9db614caeff", "6a36055f-fa53-2915-9432-786820630ea7", "4acaebcc-6c10-2a2a-858b-29c7e4fb410d", "352e9c46-69fb-27a7-8b1d-bc83c253c676", "09582244-e2c2-2de1-956c-357092d949d1", "7f30f368-42f9-27ed-852b-e6cfc067acea", "73315a29-185c-2c8a-860a-e0303d3dd492", "fcf66d96-622d-291c-842c-836d61a13779", "8eabc461-5af7-2f32-8663-ce5a10fd97b3", "c7895f29-339c-2d13-83e9-90dbe61fa8be", "5341b7dd-8a66-2cdd-8789-886081e65f53", "fcf66d79-622d-291c-84dd-a88fa9c4e664", "c92fb5b1-f771-2064-8492-1233552bf94d", "4d3d82a2-8cf4-2e04-810b-7634c83eed98", "9766cbf7-6321-2e2f-81e1-2b46533c64dd", "68bae76a-3567-2f7c-8143-f70365deb0f7", "38770ca3-86d7-27b8-85a7-7d840ffdec6a", "8f0f1455-55de-28ce-832d-d58f1c6c398d", "baf0a8fb-26d4-2033-8a28-2001356bbb9a", "fcf66dac-622d-291c-8542-d108fb4a91f5", "dbeb4d1b-faf9-2324-9ac8-cbad7aa51d12", "6bde6055-9162-246f-8f44-e3efc21c9e14", "2e369567-e133-204c-909a-c5da44bb58df", "d7d40d66-7a5d-2b36-941c-8c8d61e4e0ab", "a644cb91-0ee5-2f66-9da1-5edabca2f13d", "d7d40d70-7a5d-2b36-960f-ddf3159cb360", "d04eb40f-1d53-27ea-8a41-47892bde7017", "19eda6f4-55aa-29a0-8893-8eac3a4d8193", "6a360561-fa53-2915-94d5-2b7d2ce9b169", "569d8f03-72aa-2f24-882f-1ce08cc072e6", "c92fb596-f771-2064-86ba-d429f869e52f", "80b8588d-4a8d-222f-84c5-8c51b9af1c2f", "7272e16c-a01b-20f6-8961-a0927b4a7629", "e3004a81-9f2a-2778-874e-fa76b0e67096", "1c21154c-f201-2d25-875c-01ab18e03231", "8eabc469-5af7-2f32-840f-c1be88e46c62", "0cac75f6-8d6f-2d13-8d7b-dc01a394149c", "ab835f92-54c6-29a1-99eb-63169a21d553", "7ab2a9d1-ebc6-2056-8880-07b5c7404d58", "ad408c8d-84db-2095-8963-4570e5546cbd", "10b1795b-3938-2467-88fe-b10cad8913f8", "c92fb5ab-f771-2064-842c-c342564aabcc", "bcb0fe15-4f39-2c70-9f48-a26b76dfe042", "09582248-e2c2-2de1-94ff-edbe78c9c0b4", "5341b7bf-8a66-2cdd-8794-026113b7c312", "77361fcc-d054-2a22-89ce-6bc2aa429596", "0f2f2721-b736-2f71-8cd7-8f86d4bd0437", "0ad2d3a1-79e2-2212-9b99-a96495d9f7fe"] diff --git a/data_preparation/meta_data/mp3d_mapping.json b/data_preparation/meta_data/mp3d_mapping.json new file mode 100644 index 0000000..c3cba7e --- /dev/null +++ b/data_preparation/meta_data/mp3d_mapping.json @@ -0,0 +1 @@ +{"ZMojNkEp431": "1mp3d_0000", "uNb9QFRL6hY": "1mp3d_0001", "pRbA3pwrgk9": "1mp3d_0002", "1pXnuDYAj8r": "1mp3d_0003", "ULsKaCPVFJR": "1mp3d_0004", "D7N2EKCX4Sj": "1mp3d_0005", "sT4fr6TAbpF": "1mp3d_0006", "YmJkqBEsHnH": "1mp3d_0007", "VLzqgDo317F": "1mp3d_0008", "EU6Fwq7SyZv": "1mp3d_0009", "s8pcmisQ38h": "1mp3d_0010", "JeFG25nYj2p": "1mp3d_0011", "E9uDoFAP3SH": "1mp3d_0012", "PX4nDJXEHrG": "1mp3d_0013", "Vvot9Ly1tCj": "1mp3d_0014", "5ZKStnWn8Zo": "1mp3d_0015", "S9hNv5qa7GM": "1mp3d_0016", "p5wJjkQkbXX": "1mp3d_0017", "b8cTxDM8gDG": "1mp3d_0018", "mJXqzFtmKg4": "1mp3d_0019", "UwV83HsGsw3": "1mp3d_0020", "wc2JMjhGNzB": "1mp3d_0021", "XcA2TqTSSAj": "1mp3d_0022", "8WUmhLawc2A": "1mp3d_0023", "PuKPg4mmafe": "1mp3d_0024", "jh4fc5c5qoQ": "1mp3d_0025", "oLBMNvg9in8": "1mp3d_0026", "5LpN3gDmAk7": "1mp3d_0027", "WYY7iVyf5p8": "1mp3d_0028", "7y3sRwLe3Va": "1mp3d_0029", "JmbYfDe2QKZ": "1mp3d_0030", "jtcxE69GiFV": "1mp3d_0031", "TbHJrupSAjP": "1mp3d_0032", "kEZ7cmS4wCh": "1mp3d_0033", "q9vSo1VnCiC": "1mp3d_0034", "SN83YJsR3w2": "1mp3d_0035", "sKLMLpTHeUy": "1mp3d_0036", "pa4otMbVnkk": "1mp3d_0037", "QUCTc6BB5sX": "1mp3d_0038", "B6ByNegPMKs": "1mp3d_0039", "vyrNrziPKCB": "1mp3d_0040", "VFuaQ6m2Qom": "1mp3d_0041", "X7HyMhZNoso": "1mp3d_0042", "2azQ1b91cZZ": "1mp3d_0043", "VzqfbhrpDEA": "1mp3d_0044", "i5noydFURQK": "1mp3d_0045", "r1Q1Z4BcV1o": "1mp3d_0046", "Uxmj2M2itWa": "1mp3d_0047", "759xd9YjKW5": "1mp3d_0048", "yqstnuAEVhm": "1mp3d_0049", "r47D5H71a5s": "1mp3d_0050", "YFuZgdQ5vWj": "1mp3d_0051", "HxpKQynjfin": "1mp3d_0052", "rqfALeAoiTq": "1mp3d_0053", "ARNzJeq3xxb": "1mp3d_0054", "RPmz2sHmrrY": "1mp3d_0055", "Vt2qJdWjCF2": "1mp3d_0056", "V2XKFyX4ASd": "1mp3d_0057", "dhjEzFoUFzH": "1mp3d_0058", "zsNo4HB9uLZ": "1mp3d_0059", "aayBHfsNo7d": "1mp3d_0060", "rPc6DW4iMge": "1mp3d_0061", "1LXtFkjw3qL": "1mp3d_0062", "fzynW3qQPVF": "1mp3d_0063", "e9zR4mvMWw7": "1mp3d_0064", "2n8kARJN3HM": "1mp3d_0065", "ur6pFq6Qu1A": "1mp3d_0066", "ac26ZMwG7aT": "1mp3d_0067", "YVUC4YcDtcY": "1mp3d_0068", "gTV8FGcVJC9": "1mp3d_0069", "pLe4wQe7qrG": "1mp3d_0070", "Z6MFQCViBuw": "1mp3d_0071", "VVfe2KiqLaN": "1mp3d_0072", "qoiz87JEwZ2": "1mp3d_0073", "JF19kD82Mey": "1mp3d_0074", "17DRP5sb8fy": "1mp3d_0075", "82sE5b5pLXE": "1mp3d_0076", "5q7pvUzZiYa": "1mp3d_0077", "29hnd4uzFmX": "1mp3d_0078", "EDJbREhghzL": "1mp3d_0079", "gxdoqLR6rwA": "1mp3d_0080", "D7G3Y4RVNrH": "1mp3d_0081", "x8F5xyUWy9e": "1mp3d_0082", "GdvgFV5R1Z5": "1mp3d_0083", "cV4RVeZvu5T": "1mp3d_0084", "8194nk5LbLH": "1mp3d_0085", "2t7WUuJeko7": "1mp3d_0086", "Pm6F8kyY3z2": "1mp3d_0087", "gZ6f7yhEvPG": "1mp3d_0088", "gYvKGZ5eRqb": "1mp3d_0089"} diff --git a/data_preparation/meta_data/mp3d_matrix.npy b/data_preparation/meta_data/mp3d_matrix.npy new file mode 100644 index 0000000..c9b12f7 Binary files /dev/null and b/data_preparation/meta_data/mp3d_matrix.npy differ diff --git a/data_preparation/meta_data/scans_axis_alignment_matrices.json b/data_preparation/meta_data/scans_axis_alignment_matrices.json new file mode 100644 index 0000000..671dd53 --- /dev/null +++ b/data_preparation/meta_data/scans_axis_alignment_matrices.json @@ -0,0 +1,27236 @@ +{ + "scene0000_00": [ + 0.945519, + 0.325568, + 0.0, + -5.38439, + -0.325568, + 0.945519, + 0.0, + -2.87178, + 0.0, + 0.0, + 1.0, + -0.06435, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0000_01": [ + 0.95882, + 0.284015, + 0.0, + -5.16934, + -0.284015, + 0.95882, + 0.0, + -2.98245, + 0.0, + 0.0, + 1.0, + -0.052345, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0000_02": [ + -0.398749, + 0.91706, + 0.0, + -2.50415, + -0.91706, + -0.398749, + 0.0, + 5.67627, + 0.0, + 0.0, + 1.0, + -0.046902, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0001_00": [ + 0.580703, + 0.814116, + 0.0, + -5.66755, + -0.814116, + 0.580703, + 0.0, + 1.48678, + 0.0, + 0.0, + 1.0, + -0.05707, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0001_01": [ + -0.882948, + 0.469472, + 0.0, + 0.396252, + -0.469472, + -0.882948, + 0.0, + 6.05949, + 0.0, + 0.0, + 1.0, + -0.087321, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0002_00": [ + 0.999657, + 0.026177, + 0.0, + -2.8414, + -0.026177, + 0.999657, + 0.0, + -2.65834, + 0.0, + 0.0, + 1.0, + -0.539597, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0002_01": [ + 0.522498, + 0.85264, + 0.0, + -3.94788, + -0.85264, + 0.522498, + 0.0, + 0.786508, + 0.0, + 0.0, + 1.0, + -0.733351, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0003_00": [ + -0.997564, + 0.069757, + 0.0, + 1.60319, + -0.069757, + -0.997564, + 0.0, + 1.62961, + 0.0, + 0.0, + 1.0, + -0.052278, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0003_01": [ + 0.043619, + 0.999048, + 0.0, + -2.62734, + -0.999048, + 0.043619, + 0.0, + 4.30462, + 0.0, + 0.0, + 1.0, + -0.07087, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0003_02": [ + -0.008727, + 0.999962, + 0.0, + -1.63426, + -0.999962, + -0.008727, + 0.0, + 1.67947, + 0.0, + 0.0, + 1.0, + -0.076325, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0004_00": [ + -0.891006, + 0.453991, + 0.0, + 1.27552, + -0.453991, + -0.891006, + 0.0, + 6.30358, + 0.0, + 0.0, + 1.0, + -0.338165, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0005_00": [ + 0.317305, + 0.948324, + 0.0, + -3.46486, + -0.948324, + 0.317305, + 0.0, + 1.26827, + 0.0, + 0.0, + 1.0, + -0.068607, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0005_01": [ + 0.994522, + 0.104528, + 0.0, + -2.63402, + -0.104528, + 0.994522, + 0.0, + -4.63325, + 0.0, + 0.0, + 1.0, + -0.058952, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0006_00": [ + -0.99863, + 0.052336, + 0.0, + 2.04265, + -0.052336, + -0.99863, + 0.0, + 3.88955, + 0.0, + 0.0, + 1.0, + -0.077781, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0006_01": [ + -0.25038, + 0.968148, + 0.0, + -1.57049, + -0.968148, + -0.25038, + 0.0, + 4.27436, + 0.0, + 0.0, + 1.0, + -0.096316, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0006_02": [ + -0.207912, + 0.978148, + 0.0, + -2.95243, + -0.978148, + -0.207912, + 0.0, + 4.77065, + 0.0, + 0.0, + 1.0, + -0.161227, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0007_00": [ + -0.241922, + 0.970296, + 0.0, + -0.922999, + -0.970296, + -0.241922, + 0.0, + 4.7195, + 0.0, + 0.0, + 1.0, + -0.068226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0008_00": [ + 0.333807, + 0.942641, + 0.0, + -6.62518, + -0.942641, + 0.333807, + 0.0, + 2.53049, + 0.0, + 0.0, + 1.0, + -0.142495, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0009_00": [ + 0.902585, + 0.430511, + 0.0, + -4.29384, + -0.430511, + 0.902585, + 0.0, + -2.92343, + 0.0, + 0.0, + 1.0, + -0.028379, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0009_01": [ + 0.997564, + 0.069757, + 0.0, + -2.50722, + -0.069757, + 0.997564, + 0.0, + -5.35697, + 0.0, + 0.0, + 1.0, + -0.032304, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0009_02": [ + 0.887011, + 0.461749, + 0.0, + -4.43988, + -0.461749, + 0.887011, + 0.0, + -3.33837, + 0.0, + 0.0, + 1.0, + -0.025404, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0010_00": [ + -0.694658, + 0.71934, + 0.0, + 0.24176, + -0.71934, + -0.694658, + 0.0, + 3.9996, + 0.0, + 0.0, + 1.0, + -0.088575, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0010_01": [ + -0.190809, + 0.981627, + 0.0, + -2.71972, + -0.981627, + -0.190809, + 0.0, + 3.40518, + 0.0, + 0.0, + 1.0, + -0.109657, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0011_00": [ + -0.996195, + 0.087156, + 0.0, + 3.88128, + -0.087156, + -0.996195, + 0.0, + 4.11908, + 0.0, + 0.0, + 1.0, + -0.071016, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0011_01": [ + 0.62932, + 0.777146, + 0.0, + -6.6701, + -0.777146, + 0.62932, + 0.0, + 0.462001, + 0.0, + 0.0, + 1.0, + -0.109085, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0012_00": [ + 0.608761, + 0.793353, + 0.0, + -3.72893, + -0.793353, + 0.608761, + 0.0, + 0.798026, + 0.0, + 0.0, + 1.0, + -0.053905, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0012_01": [ + 0.284015, + 0.95882, + 0.0, + -2.83136, + -0.95882, + 0.284015, + 0.0, + 1.5444, + 0.0, + 0.0, + 1.0, + -0.035698, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0012_02": [ + -0.96363, + 0.267238, + 0.0, + 1.32651, + -0.267238, + -0.96363, + 0.0, + 2.86353, + 0.0, + 0.0, + 1.0, + -0.070221, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0013_00": [ + 0.642788, + 0.766044, + 0.0, + -5.8017, + -0.766044, + 0.642788, + 0.0, + 0.312089, + 0.0, + 0.0, + 1.0, + -0.046306, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0013_01": [ + 0.920505, + 0.390731, + 0.0, + -3.86873, + -0.390731, + 0.920505, + 0.0, + -0.628762, + 0.0, + 0.0, + 1.0, + -0.039355, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0013_02": [ + -0.996195, + 0.087156, + 0.0, + 3.85919, + -0.087156, + -0.996195, + 0.0, + 3.70133, + 0.0, + 0.0, + 1.0, + -0.040361, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0014_00": [ + -0.99863, + 0.052336, + 0.0, + 1.40606, + -0.052336, + -0.99863, + 0.0, + 1.34974, + 0.0, + 0.0, + 1.0, + -0.083647, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0015_00": [ + 0.857167, + 0.515038, + 0.0, + -5.43398, + -0.515038, + 0.857167, + 0.0, + -2.12834, + 0.0, + 0.0, + 1.0, + -0.097699, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0016_00": [ + 0.017452, + 0.999848, + 0.0, + -4.4641, + -0.999848, + 0.017452, + 0.0, + 4.32467, + 0.0, + 0.0, + 1.0, + -0.082149, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0016_01": [ + 0.034899, + 0.999391, + 0.0, + -4.37579, + -0.999391, + 0.034899, + 0.0, + 4.32975, + 0.0, + 0.0, + 1.0, + -0.058195, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0016_02": [ + -0.788011, + 0.615661, + 0.0, + 0.414391, + -0.615661, + -0.788011, + 0.0, + 5.39669, + 0.0, + 0.0, + 1.0, + -0.069013, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0017_00": [ + 0.507538, + 0.861629, + 0.0, + -4.30515, + -0.861629, + 0.507538, + 0.0, + 2.05783, + 0.0, + 0.0, + 1.0, + -0.055048, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0017_01": [ + -0.398749, + 0.91706, + 0.0, + -0.656755, + -0.91706, + -0.398749, + 0.0, + 3.18725, + 0.0, + 0.0, + 1.0, + -0.061985, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0017_02": [ + 0.580703, + 0.814116, + 0.0, + -3.87933, + -0.814116, + 0.580703, + 0.0, + 0.604396, + 0.0, + 0.0, + 1.0, + -0.093907, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0018_00": [ + 0.130526, + 0.991445, + 0.0, + -1.98982, + -0.991445, + 0.130526, + 0.0, + 2.00004, + 0.0, + 0.0, + 1.0, + -0.044625, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0019_00": [ + -0.358368, + 0.93358, + 0.0, + -1.77047, + -0.93358, + -0.358368, + 0.0, + 6.5075, + 0.0, + 0.0, + 1.0, + -0.072333, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0019_01": [ + 0.113203, + 0.993572, + 0.0, + -2.84335, + -0.993572, + 0.113203, + 0.0, + 3.90632, + 0.0, + 0.0, + 1.0, + -0.1414, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0020_00": [ + -0.446198, + 0.894934, + 0.0, + -0.535249, + -0.894934, + -0.446198, + 0.0, + 3.89464, + 0.0, + 0.0, + 1.0, + -0.125865, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0020_01": [ + -0.477159, + 0.878817, + 0.0, + -0.534648, + -0.878817, + -0.477159, + 0.0, + 3.63128, + 0.0, + 0.0, + 1.0, + -0.085791, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0021_00": [ + 0.843391, + 0.5373, + 0.0, + -5.43819, + -0.5373, + 0.843391, + 0.0, + -1.38429, + 0.0, + 0.0, + 1.0, + -0.046683, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0022_00": [ + 0.241922, + 0.970296, + 0.0, + -4.58691, + -0.970296, + 0.241922, + 0.0, + 4.66323, + 0.0, + 0.0, + 1.0, + -0.18811, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0022_01": [ + -0.069757, + 0.997564, + 0.0, + -2.04842, + -0.997564, + -0.069757, + 0.0, + 6.26363, + 0.0, + 0.0, + 1.0, + -0.132091, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0023_00": [ + 0.594823, + 0.803857, + 0.0, + -6.07569, + -0.803857, + 0.594823, + 0.0, + -0.777724, + 0.0, + 0.0, + 1.0, + -0.101227, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0024_00": [ + -0.573576, + 0.819152, + 0.0, + -1.03497, + -0.819152, + -0.573576, + 0.0, + 4.74518, + 0.0, + 0.0, + 1.0, + -0.035646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0024_01": [ + -0.999848, + 0.017453, + 0.0, + 5.01727, + -0.017453, + -0.999848, + 0.0, + 2.97947, + 0.0, + 0.0, + 1.0, + -0.071109, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0024_02": [ + -0.814116, + 0.580703, + 0.0, + 1.07341, + -0.580703, + -0.814116, + 0.0, + 5.27823, + 0.0, + 0.0, + 1.0, + -0.063563, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0025_00": [ + 0.707107, + 0.707107, + 0.0, + -4.05043, + -0.707107, + 0.707107, + 0.0, + -0.052607, + 0.0, + 0.0, + 1.0, + -0.049991, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0025_01": [ + 0.173648, + 0.984808, + 0.0, + -2.76211, + -0.984808, + 0.173648, + 0.0, + 3.89975, + 0.0, + 0.0, + 1.0, + -0.109843, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0025_02": [ + 0.267238, + 0.96363, + 0.0, + -2.94491, + -0.96363, + 0.267238, + 0.0, + 2.21047, + 0.0, + 0.0, + 1.0, + -0.143068, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0026_00": [ + -0.300706, + 0.953717, + 0.0, + -1.53061, + -0.953717, + -0.300706, + 0.0, + 2.69337, + 0.0, + 0.0, + 1.0, + -0.098437, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0027_00": [ + 0.233445, + 0.97237, + 0.0, + -3.02816, + -0.97237, + 0.233445, + 0.0, + 2.84719, + 0.0, + 0.0, + 1.0, + -0.106203, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0027_01": [ + 0.366501, + 0.930418, + 0.0, + -3.87355, + -0.930418, + 0.366501, + 0.0, + 1.78887, + 0.0, + 0.0, + 1.0, + -0.099625, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0027_02": [ + -0.642788, + 0.766044, + 0.0, + 0.045767, + -0.766044, + -0.642788, + 0.0, + 5.02573, + 0.0, + 0.0, + 1.0, + -0.056236, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0028_00": [ + -0.861629, + 0.507538, + 0.0, + 1.19546, + -0.507538, + -0.861629, + 0.0, + 5.48218, + 0.0, + 0.0, + 1.0, + -0.064969, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0029_00": [ + -0.0, + 1.0, + 0.0, + -0.874899, + -1.0, + -0.0, + 0.0, + 0.987658, + 0.0, + 0.0, + 1.0, + -0.028911, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0029_01": [ + -0.999048, + 0.043619, + 0.0, + 0.973151, + -0.043619, + -0.999048, + 0.0, + 0.930997, + 0.0, + 0.0, + 1.0, + -0.025496, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0029_02": [ + 0.008727, + 0.999962, + 0.0, + -0.88287, + -0.999962, + 0.008727, + 0.0, + 1.06956, + 0.0, + 0.0, + 1.0, + -0.023973, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0030_00": [ + 0.182236, + 0.983255, + 0.0, + -3.48709, + -0.983255, + 0.182236, + 0.0, + 2.83609, + 0.0, + 0.0, + 1.0, + -0.046845, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0030_01": [ + 0.688355, + 0.725374, + 0.0, + -5.20546, + -0.725374, + 0.688355, + 0.0, + 0.42691, + 0.0, + 0.0, + 1.0, + -0.04624, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0030_02": [ + 0.642788, + 0.766044, + 0.0, + -5.27011, + -0.766044, + 0.642788, + 0.0, + 0.485111, + 0.0, + 0.0, + 1.0, + -0.045512, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0031_00": [ + -0.078459, + 0.996917, + 0.0, + -3.66279, + -0.996917, + -0.078459, + 0.0, + 4.53027, + 0.0, + 0.0, + 1.0, + -0.08462, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0031_01": [ + 0.034899, + 0.999391, + 0.0, + -4.25131, + -0.999391, + 0.034899, + 0.0, + 3.77507, + 0.0, + 0.0, + 1.0, + -0.095474, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0031_02": [ + -0.087156, + 0.996195, + 0.0, + -2.18877, + -0.996195, + -0.087156, + 0.0, + 4.8058, + 0.0, + 0.0, + 1.0, + -0.097452, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0032_00": [ + 0.121869, + 0.992546, + 0.0, + -1.8749, + -0.992546, + 0.121869, + 0.0, + 1.23172, + 0.0, + 0.0, + 1.0, + -0.03398, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0032_01": [ + -0.951056, + 0.309017, + 0.0, + 1.56168, + -0.309017, + -0.951056, + 0.0, + 2.41841, + 0.0, + 0.0, + 1.0, + -0.033528, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0033_00": [ + -0.984808, + 0.173648, + 0.0, + 2.81681, + -0.173648, + -0.984808, + 0.0, + 6.52149, + 0.0, + 0.0, + 1.0, + -0.138838, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0034_00": [ + 0.48481, + 0.87462, + 0.0, + -3.57717, + -0.87462, + 0.48481, + 0.0, + 2.69182, + 0.0, + 0.0, + 1.0, + -0.093275, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0034_01": [ + 0.422618, + 0.906308, + 0.0, + -3.90236, + -0.906308, + 0.422618, + 0.0, + 0.441602, + 0.0, + 0.0, + 1.0, + -0.088042, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0034_02": [ + -0.078459, + 0.996917, + 0.0, + -1.68935, + -0.996917, + -0.078459, + 0.0, + 2.23857, + 0.0, + 0.0, + 1.0, + -0.102415, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0035_00": [ + 0.333807, + 0.942641, + 0.0, + -2.85347, + -0.942641, + 0.333807, + 0.0, + 1.34342, + 0.0, + 0.0, + 1.0, + -0.045685, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0035_01": [ + 0.182236, + 0.983255, + 0.0, + -2.32176, + -0.983255, + 0.182236, + 0.0, + 1.37131, + 0.0, + 0.0, + 1.0, + -0.028791, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0036_00": [ + 0.766044, + 0.642788, + 0.0, + -5.61602, + -0.642788, + 0.766044, + 0.0, + 0.273173, + 0.0, + 0.0, + 1.0, + -0.021682, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0036_01": [ + -0.777146, + 0.629321, + 0.0, + 0.802055, + -0.629321, + -0.777146, + 0.0, + 3.99625, + 0.0, + 0.0, + 1.0, + -0.080967, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0037_00": [ + -0.580703, + 0.814116, + 0.0, + -1.95843, + -0.814116, + -0.580703, + 0.0, + 6.02482, + 0.0, + 0.0, + 1.0, + -0.132667, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0038_00": [ + -0.999962, + 0.008727, + 0.0, + 4.48509, + -0.008727, + -0.999962, + 0.0, + 6.07833, + 0.0, + 0.0, + 1.0, + -0.043537, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0038_01": [ + -0.798635, + 0.601815, + 0.0, + 1.24888, + -0.601815, + -0.798635, + 0.0, + 7.18845, + 0.0, + 0.0, + 1.0, + -0.044891, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0038_02": [ + -0.601815, + 0.798635, + 0.0, + 0.542472, + -0.798635, + -0.601815, + 0.0, + 7.44977, + 0.0, + 0.0, + 1.0, + -0.050995, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0039_00": [ + -0.743145, + 0.669131, + 0.0, + 0.320075, + -0.669131, + -0.743145, + 0.0, + 3.27781, + 0.0, + 0.0, + 1.0, + -0.036228, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0039_01": [ + -0.92388, + 0.382683, + 0.0, + 1.15606, + -0.382683, + -0.92388, + 0.0, + 2.93196, + 0.0, + 0.0, + 1.0, + -0.030578, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0040_00": [ + 0.258819, + 0.965926, + 0.0, + -3.90806, + -0.965926, + 0.258819, + 0.0, + 2.22862, + 0.0, + 0.0, + 1.0, + -0.078392, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0040_01": [ + -0.870356, + 0.492424, + 0.0, + 1.01776, + -0.492424, + -0.870356, + 0.0, + 4.43352, + 0.0, + 0.0, + 1.0, + -0.155821, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0041_00": [ + -0.414693, + 0.909961, + 0.0, + -2.56408, + -0.909961, + -0.414693, + 0.0, + 5.77645, + 0.0, + 0.0, + 1.0, + -0.07454, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0041_01": [ + -0.199368, + 0.979925, + 0.0, + -3.15848, + -0.979925, + -0.199368, + 0.0, + 4.12429, + 0.0, + 0.0, + 1.0, + -0.097119, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0042_00": [ + -0.052336, + 0.99863, + 0.0, + -1.74907, + -0.99863, + -0.052336, + 0.0, + 4.35082, + 0.0, + 0.0, + 1.0, + -0.074574, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0042_01": [ + 0.008727, + 0.999962, + 0.0, + -1.61466, + -0.999962, + 0.008727, + 0.0, + 4.18248, + 0.0, + 0.0, + 1.0, + -0.030228, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0042_02": [ + 0.095846, + 0.995396, + 0.0, + -2.11827, + -0.995396, + 0.095846, + 0.0, + 3.972, + 0.0, + 0.0, + 1.0, + -0.03825, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0043_00": [ + 0.737277, + 0.67559, + 0.0, + -5.60442, + -0.67559, + 0.737277, + 0.0, + 1.25218, + 0.0, + 0.0, + 1.0, + -0.188572, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0043_01": [ + -0.382683, + 0.92388, + 0.0, + -1.41951, + -0.92388, + -0.382683, + 0.0, + 4.54484, + 0.0, + 0.0, + 1.0, + -0.046546, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0044_00": [ + 0.104528, + 0.994522, + 0.0, + -1.88401, + -0.994522, + 0.104528, + 0.0, + 2.92361, + 0.0, + 0.0, + 1.0, + -1.27203, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0044_01": [ + -0.017452, + 0.999848, + 0.0, + -1.32652, + -0.999848, + -0.017452, + 0.0, + 2.94998, + 0.0, + 0.0, + 1.0, + -0.778638, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0044_02": [ + 0.026177, + 0.999657, + 0.0, + -1.37925, + -0.999657, + 0.026177, + 0.0, + 2.86343, + 0.0, + 0.0, + 1.0, + -0.845694, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0045_00": [ + -0.113203, + 0.993572, + 0.0, + -3.51035, + -0.993572, + -0.113203, + 0.0, + 4.80789, + 0.0, + 0.0, + 1.0, + -0.049142, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0045_01": [ + -0.999657, + 0.026177, + 0.0, + 4.09741, + -0.026177, + -0.999657, + 0.0, + 4.74219, + 0.0, + 0.0, + 1.0, + -0.101646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0046_00": [ + -0.406737, + 0.913545, + 0.0, + -0.9444, + -0.913545, + -0.406737, + 0.0, + 3.88165, + 0.0, + 0.0, + 1.0, + -0.072924, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0046_01": [ + -0.165048, + 0.986286, + 0.0, + -1.31958, + -0.986286, + -0.165048, + 0.0, + 3.1079, + 0.0, + 0.0, + 1.0, + -0.08168, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0046_02": [ + -0.71325, + 0.700909, + 0.0, + -0.066107, + -0.700909, + -0.71325, + 0.0, + 4.16496, + 0.0, + 0.0, + 1.0, + -0.083053, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0047_00": [ + 0.052336, + 0.99863, + 0.0, + -2.7623, + -0.99863, + 0.052336, + 0.0, + 3.95611, + 0.0, + 0.0, + 1.0, + -0.042103, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0048_00": [ + 0.997564, + 0.069757, + 0.0, + -2.4103, + -0.069757, + 0.997564, + 0.0, + -3.23033, + 0.0, + 0.0, + 1.0, + -0.021884, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0048_01": [ + -0.987688, + 0.156434, + 0.0, + 1.84724, + -0.156434, + -0.987688, + 0.0, + 4.72119, + 0.0, + 0.0, + 1.0, + -0.090264, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0049_00": [ + -0.130526, + 0.991445, + 0.0, + -1.62033, + -0.991445, + -0.130526, + 0.0, + 3.68528, + 0.0, + 0.0, + 1.0, + -0.068119, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0050_00": [ + 0.069757, + 0.997564, + 0.0, + -2.71536, + -0.997564, + 0.069757, + 0.0, + 3.71767, + 0.0, + 0.0, + 1.0, + -0.084889, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0050_01": [ + -0.061049, + 0.998135, + 0.0, + -2.161, + -0.998135, + -0.061049, + 0.0, + 2.14481, + 0.0, + 0.0, + 1.0, + -0.153264, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0050_02": [ + 0.104528, + 0.994522, + 0.0, + -2.54242, + -0.994522, + 0.104528, + 0.0, + 3.96531, + 0.0, + 0.0, + 1.0, + -0.15307, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0051_00": [ + -0.953717, + 0.300706, + 0.0, + 1.22832, + -0.300706, + -0.953717, + 0.0, + 5.2393, + 0.0, + 0.0, + 1.0, + -0.118472, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0051_01": [ + -0.130526, + 0.991445, + 0.0, + -3.11738, + -0.991445, + -0.130526, + 0.0, + 3.18591, + 0.0, + 0.0, + 1.0, + -0.222521, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0051_02": [ + -0.987688, + 0.156434, + 0.0, + 2.2363, + -0.156434, + -0.987688, + 0.0, + 2.99901, + 0.0, + 0.0, + 1.0, + -0.171765, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0051_03": [ + 0.267238, + 0.96363, + 0.0, + -5.03472, + -0.96363, + 0.267238, + 0.0, + 1.25873, + 0.0, + 0.0, + 1.0, + -0.137077, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0052_00": [ + -0.406737, + 0.913545, + 0.0, + -1.35393, + -0.913545, + -0.406737, + 0.0, + 3.53148, + 0.0, + 0.0, + 1.0, + -0.078648, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0052_01": [ + 0.25038, + 0.968148, + 0.0, + -3.32218, + -0.968148, + 0.25038, + 0.0, + 3.43055, + 0.0, + 0.0, + 1.0, + -0.075673, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0052_02": [ + -0.026177, + 0.999657, + 0.0, + -3.0945, + -0.999657, + -0.026177, + 0.0, + 3.25053, + 0.0, + 0.0, + 1.0, + -0.101159, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0053_00": [ + 0.043619, + 0.999048, + 0.0, + -2.32041, + -0.999048, + 0.043619, + 0.0, + 1.88463, + 0.0, + 0.0, + 1.0, + -0.074808, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0054_00": [ + 0.66262, + 0.748956, + 0.0, + -6.6846, + -0.748956, + 0.66262, + 0.0, + 0.447543, + 0.0, + 0.0, + 1.0, + -0.302899, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0055_00": [ + -0.087156, + 0.996195, + 0.0, + -1.4282, + -0.996195, + -0.087156, + 0.0, + 1.92033, + 0.0, + 0.0, + 1.0, + -0.067649, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0055_01": [ + -0.087156, + 0.996195, + 0.0, + -1.43876, + -0.996195, + -0.087156, + 0.0, + 1.97034, + 0.0, + 0.0, + 1.0, + -0.038045, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0055_02": [ + -0.087156, + 0.996195, + 0.0, + -1.41561, + -0.996195, + -0.087156, + 0.0, + 1.95024, + 0.0, + 0.0, + 1.0, + -0.056736, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0056_00": [ + 0.233445, + 0.97237, + 0.0, + -4.30015, + -0.97237, + 0.233445, + 0.0, + 2.80161, + 0.0, + 0.0, + 1.0, + -0.121913, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0056_01": [ + -0.981627, + 0.190809, + 0.0, + 2.23394, + -0.190809, + -0.981627, + 0.0, + 4.92578, + 0.0, + 0.0, + 1.0, + -0.056888, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0057_00": [ + 0.121869, + 0.992546, + 0.0, + -2.92948, + -0.992546, + 0.121869, + 0.0, + 3.42151, + 0.0, + 0.0, + 1.0, + -0.07956, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0057_01": [ + -0.725374, + 0.688354, + 0.0, + 0.290207, + -0.688354, + -0.725374, + 0.0, + 4.83777, + 0.0, + 0.0, + 1.0, + -0.06954, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0058_00": [ + -0.382683, + 0.92388, + 0.0, + -1.43617, + -0.92388, + -0.382683, + 0.0, + 3.69894, + 0.0, + 0.0, + 1.0, + -0.048434, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0058_01": [ + 0.087156, + 0.996195, + 0.0, + -2.50753, + -0.996195, + 0.087156, + 0.0, + 3.15065, + 0.0, + 0.0, + 1.0, + -0.076828, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0059_00": [ + 0.936672, + 0.350207, + 0.0, + -4.78862, + -0.350207, + 0.936672, + 0.0, + -2.58875, + 0.0, + 0.0, + 1.0, + -0.048405, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0059_01": [ + -0.008727, + 0.999962, + 0.0, + -2.30333, + -0.999962, + -0.008727, + 0.0, + 3.18181, + 0.0, + 0.0, + 1.0, + -0.081017, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0059_02": [ + -0.902585, + 0.430511, + 0.0, + 0.983921, + -0.430511, + -0.902585, + 0.0, + 4.92167, + 0.0, + 0.0, + 1.0, + -0.082146, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0060_00": [ + -0.139173, + 0.990268, + 0.0, + -1.43492, + -0.990268, + -0.139173, + 0.0, + 5.26964, + 0.0, + 0.0, + 1.0, + -0.112771, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0060_01": [ + 0.026177, + 0.999657, + 0.0, + -3.70589, + -0.999657, + 0.026177, + 0.0, + 5.30222, + 0.0, + 0.0, + 1.0, + -0.030533, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0061_00": [ + 0.087156, + 0.996195, + 0.0, + -3.94864, + -0.996195, + 0.087156, + 0.0, + 4.71681, + 0.0, + 0.0, + 1.0, + -0.082685, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0061_01": [ + -0.026177, + 0.999657, + 0.0, + -2.14348, + -0.999657, + -0.026177, + 0.0, + 2.70507, + 0.0, + 0.0, + 1.0, + -0.071428, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0062_00": [ + 0.317305, + 0.948324, + 0.0, + -2.25227, + -0.948324, + 0.317305, + 0.0, + 1.88226, + 0.0, + 0.0, + 1.0, + -0.026918, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0062_01": [ + -0.199368, + 0.979925, + 0.0, + -0.642091, + -0.979925, + -0.199368, + 0.0, + 3.65371, + 0.0, + 0.0, + 1.0, + -0.029094, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0062_02": [ + -0.707107, + 0.707107, + 0.0, + 0.685667, + -0.707107, + -0.707107, + 0.0, + 3.05333, + 0.0, + 0.0, + 1.0, + -0.032658, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0063_00": [ + 0.887011, + 0.461749, + 0.0, + -6.05128, + -0.461749, + 0.887011, + 0.0, + -1.53409, + 0.0, + 0.0, + 1.0, + -0.11359, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0064_00": [ + 0.999391, + 0.034899, + 0.0, + -3.78343, + -0.034899, + 0.999391, + 0.0, + -6.15542, + 0.0, + 0.0, + 1.0, + -0.152881, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0064_01": [ + -0.989016, + 0.147809, + 0.0, + 2.33839, + -0.147809, + -0.989016, + 0.0, + 4.09956, + 0.0, + 0.0, + 1.0, + -0.124535, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0065_00": [ + -0.580703, + 0.814116, + 0.0, + -1.24761, + -0.814116, + -0.580703, + 0.0, + 3.12044, + 0.0, + 0.0, + 1.0, + -0.037417, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0065_01": [ + 0.936672, + 0.350207, + 0.0, + -2.43242, + -0.350207, + 0.936672, + 0.0, + -1.13065, + 0.0, + 0.0, + 1.0, + -0.053514, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0065_02": [ + 0.829038, + 0.559193, + 0.0, + -2.75642, + -0.559193, + 0.829038, + 0.0, + -0.574604, + 0.0, + 0.0, + 1.0, + -0.051784, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0066_00": [ + -0.927184, + 0.374606, + 0.0, + 2.82815, + -0.374606, + -0.927184, + 0.0, + 5.5917, + 0.0, + 0.0, + 1.0, + -0.083158, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0067_00": [ + -0.104529, + 0.994522, + 0.0, + -0.831416, + -0.994522, + -0.104529, + 0.0, + 3.1264, + 0.0, + 0.0, + 1.0, + -0.056328, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0067_01": [ + -0.992546, + 0.121869, + 0.0, + 0.884242, + -0.121869, + -0.992546, + 0.0, + 1.42294, + 0.0, + 0.0, + 1.0, + -0.026298, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0067_02": [ + 0.026177, + 0.999657, + 0.0, + -0.952595, + -0.999657, + 0.026177, + 0.0, + 2.9435, + 0.0, + 0.0, + 1.0, + -0.058579, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0068_00": [ + 0.99863, + 0.052336, + 0.0, + -3.40658, + -0.052336, + 0.99863, + 0.0, + -1.55335, + 0.0, + 0.0, + 1.0, + -0.117037, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0068_01": [ + 0.997564, + 0.069757, + 0.0, + -1.52264, + -0.069757, + 0.997564, + 0.0, + -1.49961, + 0.0, + 0.0, + 1.0, + -0.066008, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0069_00": [ + -0.052336, + 0.99863, + 0.0, + -2.53816, + -0.99863, + -0.052336, + 0.0, + 4.58045, + 0.0, + 0.0, + 1.0, + -1.12428, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0070_00": [ + -0.130526, + 0.991445, + 0.0, + -1.6996, + -0.991445, + -0.130526, + 0.0, + 2.2097, + 0.0, + 0.0, + 1.0, + -0.067307, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0071_00": [ + 0.182236, + 0.983255, + 0.0, + -3.89008, + -0.983255, + 0.182236, + 0.0, + 1.37627, + 0.0, + 0.0, + 1.0, + -0.124496, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0072_00": [ + 0.121869, + 0.992546, + 0.0, + -1.48051, + -0.992546, + 0.121869, + 0.0, + 2.34423, + 0.0, + 0.0, + 1.0, + -0.082838, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0072_01": [ + -0.026177, + 0.999657, + 0.0, + -1.18168, + -0.999657, + -0.026177, + 0.0, + 1.7942, + 0.0, + 0.0, + 1.0, + -0.079995, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0072_02": [ + -0.284015, + 0.95882, + 0.0, + -1.01703, + -0.95882, + -0.284015, + 0.0, + 2.119, + 0.0, + 0.0, + 1.0, + -0.126875, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0073_00": [ + -0.061049, + 0.998135, + 0.0, + -1.46269, + -0.998135, + -0.061049, + 0.0, + 1.53242, + 0.0, + 0.0, + 1.0, + -0.055064, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0073_01": [ + 0.034899, + 0.999391, + 0.0, + -1.69115, + -0.999391, + 0.034899, + 0.0, + 1.40642, + 0.0, + 0.0, + 1.0, + -0.041904, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0073_02": [ + -0.0, + 1.0, + 0.0, + -1.42551, + -1.0, + -0.0, + 0.0, + 1.53997, + 0.0, + 0.0, + 1.0, + -0.070876, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0073_03": [ + -0.0, + 1.0, + 0.0, + -1.36961, + -1.0, + -0.0, + 0.0, + 1.61455, + 0.0, + 0.0, + 1.0, + -0.053872, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0074_00": [ + -0.199368, + 0.979925, + 0.0, + -0.449465, + -0.979925, + -0.199368, + 0.0, + 2.18101, + 0.0, + 0.0, + 1.0, + -0.203169, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0074_01": [ + -0.034899, + 0.999391, + 0.0, + -0.619598, + -0.999391, + -0.034899, + 0.0, + 2.30675, + 0.0, + 0.0, + 1.0, + -0.195645, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0074_02": [ + 0.008727, + 0.999962, + 0.0, + -0.704564, + -0.999962, + 0.008727, + 0.0, + 2.33488, + 0.0, + 0.0, + 1.0, + -0.221473, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0075_00": [ + -0.017452, + 0.999848, + 0.0, + -2.40322, + -0.999848, + -0.017452, + 0.0, + 3.72284, + 0.0, + 0.0, + 1.0, + -0.043656, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0076_00": [ + -0.522498, + 0.85264, + 0.0, + -1.34469, + -0.85264, + -0.522498, + 0.0, + 5.57816, + 0.0, + 0.0, + 1.0, + -0.064575, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0077_00": [ + 0.008727, + 0.999962, + 0.0, + -1.88353, + -0.999962, + 0.008727, + 0.0, + 1.89696, + 0.0, + 0.0, + 1.0, + -0.092646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0077_01": [ + -0.190809, + 0.981627, + 0.0, + -3.85328, + -0.981627, + -0.190809, + 0.0, + 5.30149, + 0.0, + 0.0, + 1.0, + -0.085364, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0078_00": [ + -0.104529, + 0.994522, + 0.0, + -2.663, + -0.994522, + -0.104529, + 0.0, + 3.20243, + 0.0, + 0.0, + 1.0, + -0.044086, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0078_01": [ + -0.078459, + 0.996917, + 0.0, + -2.76369, + -0.996917, + -0.078459, + 0.0, + 3.18386, + 0.0, + 0.0, + 1.0, + -0.024307, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0078_02": [ + -0.113203, + 0.993572, + 0.0, + -2.58785, + -0.993572, + -0.113203, + 0.0, + 2.97902, + 0.0, + 0.0, + 1.0, + -0.023365, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0079_00": [ + -0.990268, + 0.139173, + 0.0, + 2.48176, + -0.139173, + -0.990268, + 0.0, + 7.78043, + 0.0, + 0.0, + 1.0, + -0.07175, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0079_01": [ + 0.325568, + 0.945519, + 0.0, + -3.92664, + -0.945519, + 0.325568, + 0.0, + 4.20239, + 0.0, + 0.0, + 1.0, + -0.043857, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0080_00": [ + -0.0, + 1.0, + 0.0, + -0.997136, + -1.0, + -0.0, + 0.0, + 2.45172, + 0.0, + 0.0, + 1.0, + -0.044227, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0080_01": [ + 0.104528, + 0.994522, + 0.0, + -1.43918, + -0.994522, + 0.104528, + 0.0, + 2.31001, + 0.0, + 0.0, + 1.0, + -0.041348, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0080_02": [ + 0.061049, + 0.998135, + 0.0, + -1.29606, + -0.998135, + 0.061049, + 0.0, + 1.38798, + 0.0, + 0.0, + 1.0, + -0.032576, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0081_00": [ + 0.894934, + 0.446198, + 0.0, + -3.10462, + -0.446198, + 0.894934, + 0.0, + -2.77649, + 0.0, + 0.0, + 1.0, + -0.07867, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0081_01": [ + -0.990268, + 0.139173, + 0.0, + 3.41337, + -0.139173, + -0.990268, + 0.0, + 5.96633, + 0.0, + 0.0, + 1.0, + -0.121472, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0081_02": [ + -0.99863, + 0.052336, + 0.0, + 4.27139, + -0.052336, + -0.99863, + 0.0, + 5.94875, + 0.0, + 0.0, + 1.0, + -0.460141, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0082_00": [ + -0.998135, + 0.061049, + 0.0, + 1.91568, + -0.061049, + -0.998135, + 0.0, + 4.42624, + 0.0, + 0.0, + 1.0, + -0.535742, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0083_00": [ + -0.113203, + 0.993572, + 0.0, + -1.48134, + -0.993572, + -0.113203, + 0.0, + 1.81551, + 0.0, + 0.0, + 1.0, + -0.172324, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0083_01": [ + -0.130526, + 0.991445, + 0.0, + -1.55075, + -0.991445, + -0.130526, + 0.0, + 1.84377, + 0.0, + 0.0, + 1.0, + -0.138105, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0084_00": [ + 0.026177, + 0.999657, + 0.0, + -2.77961, + -0.999657, + 0.026177, + 0.0, + 1.74237, + 0.0, + 0.0, + 1.0, + -0.028384, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0084_01": [ + -0.97437, + 0.224951, + 0.0, + 3.31617, + -0.224951, + -0.97437, + 0.0, + 3.24071, + 0.0, + 0.0, + 1.0, + -0.021563, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0084_02": [ + 0.986286, + 0.165048, + 0.0, + -3.1944, + -0.165048, + 0.986286, + 0.0, + -2.82327, + 0.0, + 0.0, + 1.0, + -0.026556, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0085_00": [ + 0.104528, + 0.994522, + 0.0, + -2.51219, + -0.994522, + 0.104528, + 0.0, + 2.77614, + 0.0, + 0.0, + 1.0, + -0.108557, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0085_01": [ + -0.986286, + 0.165048, + 0.0, + 1.88511, + -0.165048, + -0.986286, + 0.0, + 3.55773, + 0.0, + 0.0, + 1.0, + -0.152584, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0086_00": [ + -0.829038, + 0.559193, + 0.0, + -0.050899, + -0.559193, + -0.829038, + 0.0, + 5.33916, + 0.0, + 0.0, + 1.0, + -0.039508, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0086_01": [ + -0.507538, + 0.861629, + 0.0, + -0.875172, + -0.861629, + -0.507538, + 0.0, + 5.08348, + 0.0, + 0.0, + 1.0, + -0.035505, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0086_02": [ + -0.580703, + 0.814116, + 0.0, + -0.669858, + -0.814116, + -0.580703, + 0.0, + 5.37811, + 0.0, + 0.0, + 1.0, + -0.0436, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0087_00": [ + 0.986286, + 0.165048, + 0.0, + -2.88518, + -0.165048, + 0.986286, + 0.0, + -3.42594, + 0.0, + 0.0, + 1.0, + -0.039596, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0087_01": [ + -0.258819, + 0.965926, + 0.0, + -1.96097, + -0.965926, + -0.258819, + 0.0, + 3.17832, + 0.0, + 0.0, + 1.0, + -0.105675, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0087_02": [ + -0.043619, + 0.999048, + 0.0, + -2.29632, + -0.999048, + -0.043619, + 0.0, + 2.30799, + 0.0, + 0.0, + 1.0, + -0.073834, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0088_00": [ + 0.477159, + 0.878817, + 0.0, + -4.8832, + -0.878817, + 0.477159, + 0.0, + 1.51725, + 0.0, + 0.0, + 1.0, + -0.030037, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0088_01": [ + 0.398749, + 0.91706, + 0.0, + -3.41454, + -0.91706, + 0.398749, + 0.0, + 1.77059, + 0.0, + 0.0, + 1.0, + -0.062874, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0088_02": [ + -0.300706, + 0.953717, + 0.0, + -2.55856, + -0.953717, + -0.300706, + 0.0, + 5.83871, + 0.0, + 0.0, + 1.0, + -0.095503, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0088_03": [ + -0.782608, + 0.622515, + 0.0, + 0.118658, + -0.622515, + -0.782608, + 0.0, + 5.53645, + 0.0, + 0.0, + 1.0, + -0.079202, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0089_00": [ + 0.034899, + 0.999391, + 0.0, + -1.92383, + -0.999391, + 0.034899, + 0.0, + 1.91889, + 0.0, + 0.0, + 1.0, + -0.020687, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0089_01": [ + 0.078459, + 0.996917, + 0.0, + -1.9655, + -0.996917, + 0.078459, + 0.0, + 2.04716, + 0.0, + 0.0, + 1.0, + -0.032612, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0089_02": [ + -0.0, + 1.0, + 0.0, + -1.75439, + -1.0, + -0.0, + 0.0, + 2.12378, + 0.0, + 0.0, + 1.0, + -0.015609, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0090_00": [ + -0.147809, + 0.989016, + 0.0, + -1.81327, + -0.989016, + -0.147809, + 0.0, + 2.23756, + 0.0, + 0.0, + 1.0, + -0.0781, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0091_00": [ + -0.34202, + 0.939693, + 0.0, + -1.05757, + -0.939693, + -0.34202, + 0.0, + 4.32218, + 0.0, + 0.0, + 1.0, + -0.06956, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0092_00": [ + 0.061049, + 0.998135, + 0.0, + -2.12182, + -0.998135, + 0.061049, + 0.0, + 1.98556, + 0.0, + 0.0, + 1.0, + -0.128878, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0092_01": [ + 0.182236, + 0.983255, + 0.0, + -2.6919, + -0.983255, + 0.182236, + 0.0, + 1.86917, + 0.0, + 0.0, + 1.0, + -0.110012, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0092_02": [ + -0.927184, + 0.374606, + 0.0, + 1.43997, + -0.374606, + -0.927184, + 0.0, + 3.27206, + 0.0, + 0.0, + 1.0, + -0.127205, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0092_03": [ + -0.241922, + 0.970296, + 0.0, + -1.29172, + -0.970296, + -0.241922, + 0.0, + 4.04498, + 0.0, + 0.0, + 1.0, + -0.11442, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0092_04": [ + -0.333807, + 0.942641, + 0.0, + -1.24016, + -0.942641, + -0.333807, + 0.0, + 4.47154, + 0.0, + 0.0, + 1.0, + -0.139929, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0093_00": [ + 0.199368, + 0.979925, + 0.0, + -2.57948, + -0.979925, + 0.199368, + 0.0, + 2.12269, + 0.0, + 0.0, + 1.0, + -0.036185, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0093_01": [ + 0.25038, + 0.968148, + 0.0, + -2.61912, + -0.968148, + 0.25038, + 0.0, + 1.23678, + 0.0, + 0.0, + 1.0, + -0.026763, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0093_02": [ + 0.104528, + 0.994522, + 0.0, + -2.28841, + -0.994522, + 0.104528, + 0.0, + 1.51754, + 0.0, + 0.0, + 1.0, + -0.024655, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0094_00": [ + -0.999048, + 0.043619, + 0.0, + 4.21736, + -0.043619, + -0.999048, + 0.0, + 2.14912, + 0.0, + 0.0, + 1.0, + -0.055318, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0095_00": [ + 0.078459, + 0.996917, + 0.0, + -2.75268, + -0.996917, + 0.078459, + 0.0, + 5.77958, + 0.0, + 0.0, + 1.0, + -0.056113, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0095_01": [ + 0.21644, + 0.976296, + 0.0, + -3.45984, + -0.976296, + 0.21644, + 0.0, + 3.62579, + 0.0, + 0.0, + 1.0, + -0.066101, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0096_00": [ + -0.999048, + 0.043619, + 0.0, + 2.08481, + -0.043619, + -0.999048, + 0.0, + 1.78846, + 0.0, + 0.0, + 1.0, + -0.052161, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0096_01": [ + 0.843391, + 0.5373, + 0.0, + -3.36659, + -0.5373, + 0.843391, + 0.0, + -0.524736, + 0.0, + 0.0, + 1.0, + -0.093807, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0096_02": [ + -0.052336, + 0.99863, + 0.0, + -1.99247, + -0.99863, + -0.052336, + 0.0, + 1.80299, + 0.0, + 0.0, + 1.0, + -0.061186, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0097_00": [ + 0.087156, + 0.996195, + 0.0, + -1.59119, + -0.996195, + 0.087156, + 0.0, + 1.51071, + 0.0, + 0.0, + 1.0, + -0.045175, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0098_00": [ + 0.983255, + 0.182236, + 0.0, + -2.36843, + -0.182236, + 0.983255, + 0.0, + -2.72772, + 0.0, + 0.0, + 1.0, + -0.077566, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0098_01": [ + -0.999391, + 0.034899, + 0.0, + 1.55629, + -0.034899, + -0.999391, + 0.0, + 2.65289, + 0.0, + 0.0, + 1.0, + -0.067588, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0099_00": [ + 0.358368, + 0.93358, + 0.0, + -3.99769, + -0.93358, + 0.358368, + 0.0, + 1.71503, + 0.0, + 0.0, + 1.0, + -0.117617, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0099_01": [ + 0.241922, + 0.970296, + 0.0, + -3.41367, + -0.970296, + 0.241922, + 0.0, + 1.57233, + 0.0, + 0.0, + 1.0, + -0.128454, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0100_00": [ + 0.300706, + 0.953717, + 0.0, + -2.46315, + -0.953717, + 0.300706, + 0.0, + 2.0427, + 0.0, + 0.0, + 1.0, + -0.050825, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0100_01": [ + -0.061049, + 0.998135, + 0.0, + -1.16144, + -0.998135, + -0.061049, + 0.0, + 3.3312, + 0.0, + 0.0, + 1.0, + -0.05075, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0100_02": [ + 0.052336, + 0.99863, + 0.0, + -1.6519, + -0.99863, + 0.052336, + 0.0, + 1.26096, + 0.0, + 0.0, + 1.0, + -0.048538, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0101_00": [ + 0.121869, + 0.992546, + 0.0, + -3.00319, + -0.992546, + 0.121869, + 0.0, + 2.85261, + 0.0, + 0.0, + 1.0, + -0.130576, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0101_01": [ + -0.121869, + 0.992546, + 0.0, + -2.23913, + -0.992546, + -0.121869, + 0.0, + 3.6407, + 0.0, + 0.0, + 1.0, + -0.089626, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0101_02": [ + 0.139173, + 0.990268, + 0.0, + -3.07943, + -0.990268, + 0.139173, + 0.0, + 3.03823, + 0.0, + 0.0, + 1.0, + -0.141604, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0101_03": [ + 0.026177, + 0.999657, + 0.0, + -2.29643, + -0.999657, + 0.026177, + 0.0, + 2.98045, + 0.0, + 0.0, + 1.0, + -0.113632, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0101_04": [ + -0.043619, + 0.999048, + 0.0, + -2.24881, + -0.999048, + -0.043619, + 0.0, + 2.43461, + 0.0, + 0.0, + 1.0, + -0.206373, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0101_05": [ + 0.190809, + 0.981627, + 0.0, + -3.20285, + -0.981627, + 0.190809, + 0.0, + 2.23175, + 0.0, + 0.0, + 1.0, + -0.101337, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0102_00": [ + -0.325568, + 0.945519, + 0.0, + -1.30527, + -0.945519, + -0.325568, + 0.0, + 3.84042, + 0.0, + 0.0, + 1.0, + -0.120212, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0102_01": [ + -0.999657, + 0.026177, + 0.0, + 1.83647, + -0.026177, + -0.999657, + 0.0, + 5.90956, + 0.0, + 0.0, + 1.0, + -0.096935, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0103_00": [ + 0.309017, + 0.951057, + 0.0, + -2.13169, + -0.951057, + 0.309017, + 0.0, + 1.1227, + 0.0, + 0.0, + 1.0, + -0.039002, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0103_01": [ + -0.953717, + 0.300706, + 0.0, + 0.796951, + -0.300706, + -0.953717, + 0.0, + 3.42727, + 0.0, + 0.0, + 1.0, + -0.035854, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0104_00": [ + -0.069757, + 0.997564, + 0.0, + -1.32653, + -0.997564, + -0.069757, + 0.0, + 2.06111, + 0.0, + 0.0, + 1.0, + -0.036858, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0105_00": [ + 0.182236, + 0.983255, + 0.0, + -1.52989, + -0.983255, + 0.182236, + 0.0, + 1.06135, + 0.0, + 0.0, + 1.0, + -0.042444, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0105_01": [ + -0.999962, + 0.008727, + 0.0, + 0.806793, + -0.008727, + -0.999962, + 0.0, + 1.33206, + 0.0, + 0.0, + 1.0, + -0.035305, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0105_02": [ + -0.965926, + 0.258819, + 0.0, + 0.685362, + -0.258819, + -0.965926, + 0.0, + 1.46726, + 0.0, + 0.0, + 1.0, + -0.077248, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0106_00": [ + 0.601815, + 0.798636, + 0.0, + -6.60254, + -0.798636, + 0.601815, + 0.0, + 0.504755, + 0.0, + 0.0, + 1.0, + -0.332535, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0106_01": [ + -0.788011, + 0.615661, + 0.0, + 0.833097, + -0.615661, + -0.788011, + 0.0, + 6.86293, + 0.0, + 0.0, + 1.0, + -0.076139, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0106_02": [ + 0.996195, + 0.087156, + 0.0, + -2.81105, + -0.087156, + 0.996195, + 0.0, + -3.44, + 0.0, + 0.0, + 1.0, + -0.092402, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0107_00": [ + -0.043619, + 0.999048, + 0.0, + -1.67152, + -0.999048, + -0.043619, + 0.0, + 4.49203, + 0.0, + 0.0, + 1.0, + -0.094614, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0108_00": [ + -0.965926, + 0.258819, + 0.0, + 1.49842, + -0.258819, + -0.965926, + 0.0, + 3.61158, + 0.0, + 0.0, + 1.0, + -0.097928, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0109_00": [ + -0.284015, + 0.95882, + 0.0, + -0.500467, + -0.95882, + -0.284015, + 0.0, + 4.38826, + 0.0, + 0.0, + 1.0, + -0.090327, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0109_01": [ + 0.267238, + 0.96363, + 0.0, + -2.58799, + -0.96363, + 0.267238, + 0.0, + 2.38794, + 0.0, + 0.0, + 1.0, + -0.14112, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0110_00": [ + 0.788011, + 0.615662, + 0.0, + -4.43752, + -0.615662, + 0.788011, + 0.0, + -0.17785, + 0.0, + 0.0, + 1.0, + -0.036931, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0110_01": [ + 0.999848, + 0.017452, + 0.0, + -2.50106, + -0.017452, + 0.999848, + 0.0, + -1.85379, + 0.0, + 0.0, + 1.0, + -0.044488, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0110_02": [ + -0.121869, + 0.992546, + 0.0, + -1.77859, + -0.992546, + -0.121869, + 0.0, + 2.88055, + 0.0, + 0.0, + 1.0, + -0.027977, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0111_00": [ + 0.469472, + 0.882948, + 0.0, + -3.54366, + -0.882948, + 0.469472, + 0.0, + 1.10113, + 0.0, + 0.0, + 1.0, + -0.159483, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0111_01": [ + -0.688354, + 0.725374, + 0.0, + 0.009799, + -0.725374, + -0.688354, + 0.0, + 4.06382, + 0.0, + 0.0, + 1.0, + -0.066651, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0111_02": [ + -0.927184, + 0.374606, + 0.0, + 1.26634, + -0.374606, + -0.927184, + 0.0, + 4.28824, + 0.0, + 0.0, + 1.0, + -0.129581, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0112_00": [ + -0.078459, + 0.996917, + 0.0, + -0.948728, + -0.996917, + -0.078459, + 0.0, + 2.2764, + 0.0, + 0.0, + 1.0, + -0.039329, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0112_01": [ + -0.438371, + 0.898794, + 0.0, + -0.371521, + -0.898794, + -0.438371, + 0.0, + 2.99474, + 0.0, + 0.0, + 1.0, + -0.0526, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0112_02": [ + -0.008727, + 0.999962, + 0.0, + -0.947847, + -0.999962, + -0.008727, + 0.0, + 1.92565, + 0.0, + 0.0, + 1.0, + -0.049793, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0113_00": [ + 0.99863, + 0.052336, + 0.0, + -2.5659, + -0.052336, + 0.99863, + 0.0, + -2.34014, + 0.0, + 0.0, + 1.0, + -0.028225, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0113_01": [ + 0.224951, + 0.97437, + 0.0, + -4.73312, + -0.97437, + 0.224951, + 0.0, + 1.37191, + 0.0, + 0.0, + 1.0, + -0.04944, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0114_00": [ + 0.793353, + 0.608761, + 0.0, + -4.32874, + -0.608761, + 0.793353, + 0.0, + -0.403754, + 0.0, + 0.0, + 1.0, + -0.035388, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0114_01": [ + 0.515038, + 0.857167, + 0.0, + -3.60193, + -0.857167, + 0.515038, + 0.0, + 1.61963, + 0.0, + 0.0, + 1.0, + -0.039448, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0114_02": [ + -0.48481, + 0.87462, + 0.0, + -0.944656, + -0.87462, + -0.48481, + 0.0, + 4.35097, + 0.0, + 0.0, + 1.0, + -0.036085, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0115_00": [ + 0.469472, + 0.882948, + 0.0, + -4.83187, + -0.882948, + 0.469472, + 0.0, + 1.23065, + 0.0, + 0.0, + 1.0, + -0.062993, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0115_01": [ + -0.224951, + 0.97437, + 0.0, + -3.23036, + -0.97437, + -0.224951, + 0.0, + 5.97938, + 0.0, + 0.0, + 1.0, + -0.071103, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0115_02": [ + 0.953717, + 0.300706, + 0.0, + -4.0012, + -0.300706, + 0.953717, + 0.0, + -2.52941, + 0.0, + 0.0, + 1.0, + -0.057141, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0116_00": [ + 0.99863, + 0.052336, + 0.0, + -1.39016, + -0.052336, + 0.99863, + 0.0, + -2.51181, + 0.0, + 0.0, + 1.0, + -0.061271, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0116_01": [ + 0.999962, + 0.008727, + 0.0, + -1.65568, + -0.008727, + 0.999962, + 0.0, + -2.17541, + 0.0, + 0.0, + 1.0, + -0.045757, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0116_02": [ + -0.095846, + 0.995396, + 0.0, + -2.93792, + -0.995396, + -0.095846, + 0.0, + 2.68162, + 0.0, + 0.0, + 1.0, + -0.067771, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0117_00": [ + 0.233445, + 0.97237, + 0.0, + -2.42798, + -0.97237, + 0.233445, + 0.0, + 2.62255, + 0.0, + 0.0, + 1.0, + -0.079987, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0118_00": [ + -0.233445, + 0.97237, + 0.0, + -1.20333, + -0.97237, + -0.233445, + 0.0, + 5.30856, + 0.0, + 0.0, + 1.0, + -0.029965, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0118_01": [ + -0.477159, + 0.878817, + 0.0, + -0.84972, + -0.878817, + -0.477159, + 0.0, + 5.27899, + 0.0, + 0.0, + 1.0, + -0.068277, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0118_02": [ + -0.649448, + 0.760406, + 0.0, + -0.463841, + -0.760406, + -0.649448, + 0.0, + 6.03008, + 0.0, + 0.0, + 1.0, + -0.056729, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0119_00": [ + 0.052336, + 0.99863, + 0.0, + -1.99873, + -0.99863, + 0.052336, + 0.0, + 3.38141, + 0.0, + 0.0, + 1.0, + -0.060906, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0120_00": [ + 0.374607, + 0.927184, + 0.0, + -3.17399, + -0.927184, + 0.374607, + 0.0, + 1.78936, + 0.0, + 0.0, + 1.0, + -0.057526, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0120_01": [ + 0.551937, + 0.833886, + 0.0, + -3.83159, + -0.833886, + 0.551937, + 0.0, + 1.12932, + 0.0, + 0.0, + 1.0, + -0.118017, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0121_00": [ + 0.999962, + 0.008727, + 0.0, + -1.42371, + -0.008727, + 0.999962, + 0.0, + -1.58071, + 0.0, + 0.0, + 1.0, + -0.183582, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0121_01": [ + 1.0, + 0.0, + 0.0, + -2.92109, + 0.0, + 1.0, + 0.0, + -1.77266, + 0.0, + 0.0, + 1.0, + -0.115823, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0121_02": [ + -0.078459, + 0.996917, + 0.0, + -1.11072, + -0.996917, + -0.078459, + 0.0, + 1.71272, + 0.0, + 0.0, + 1.0, + -0.18159, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0122_00": [ + 0.165048, + 0.986286, + 0.0, + -1.76706, + -0.986286, + 0.165048, + 0.0, + 1.56308, + 0.0, + 0.0, + 1.0, + -0.085792, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0122_01": [ + 0.398749, + 0.91706, + 0.0, + -2.3945, + -0.91706, + 0.398749, + 0.0, + 1.16856, + 0.0, + 0.0, + 1.0, + -0.046529, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0123_00": [ + 0.748956, + 0.66262, + 0.0, + -6.89871, + -0.66262, + 0.748956, + 0.0, + -1.18431, + 0.0, + 0.0, + 1.0, + -0.084753, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0123_01": [ + 0.996195, + 0.087156, + 0.0, + -4.20587, + -0.087156, + 0.996195, + 0.0, + -4.66132, + 0.0, + 0.0, + 1.0, + -0.056588, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0123_02": [ + 0.906308, + 0.422618, + 0.0, + -5.29182, + -0.422618, + 0.906308, + 0.0, + -2.3143, + 0.0, + 0.0, + 1.0, + -0.062914, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0124_00": [ + 0.026177, + 0.999657, + 0.0, + -1.25768, + -0.999657, + 0.026177, + 0.0, + 2.27758, + 0.0, + 0.0, + 1.0, + -0.093, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0124_01": [ + -0.0, + 1.0, + 0.0, + -1.22236, + -1.0, + -0.0, + 0.0, + 2.03279, + 0.0, + 0.0, + 1.0, + -0.097625, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0125_00": [ + -0.838671, + 0.544639, + 0.0, + 0.61745, + -0.544639, + -0.838671, + 0.0, + 6.72302, + 0.0, + 0.0, + 1.0, + -0.09839, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0126_00": [ + -0.233445, + 0.97237, + 0.0, + -1.13799, + -0.97237, + -0.233445, + 0.0, + 4.54095, + 0.0, + 0.0, + 1.0, + -0.042674, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0126_01": [ + -0.284015, + 0.95882, + 0.0, + -1.39547, + -0.95882, + -0.284015, + 0.0, + 3.56765, + 0.0, + 0.0, + 1.0, + -0.093632, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0126_02": [ + -0.927184, + 0.374606, + 0.0, + 1.18381, + -0.374606, + -0.927184, + 0.0, + 3.8033, + 0.0, + 0.0, + 1.0, + -0.053035, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0127_00": [ + 0.008727, + 0.999962, + 0.0, + -2.06489, + -0.999962, + 0.008727, + 0.0, + 4.40658, + 0.0, + 0.0, + 1.0, + -0.032017, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0127_01": [ + -0.224951, + 0.97437, + 0.0, + -1.80122, + -0.97437, + -0.224951, + 0.0, + 3.3522, + 0.0, + 0.0, + 1.0, + -0.042429, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0128_00": [ + -0.008727, + 0.999962, + 0.0, + -1.61305, + -0.999962, + -0.008727, + 0.0, + 2.5643, + 0.0, + 0.0, + 1.0, + -0.068544, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0129_00": [ + 0.477159, + 0.878817, + 0.0, + -4.96541, + -0.878817, + 0.477159, + 0.0, + 3.03368, + 0.0, + 0.0, + 1.0, + -0.035293, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0130_00": [ + 0.095846, + 0.995396, + 0.0, + -1.68273, + -0.995396, + 0.095846, + 0.0, + 2.14208, + 0.0, + 0.0, + 1.0, + -0.043602, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0131_00": [ + -0.529919, + 0.848048, + 0.0, + -0.650883, + -0.848048, + -0.529919, + 0.0, + 4.62565, + 0.0, + 0.0, + 1.0, + -0.063226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0131_01": [ + 0.267238, + 0.96363, + 0.0, + -3.15017, + -0.96363, + 0.267238, + 0.0, + 3.27754, + 0.0, + 0.0, + 1.0, + -0.106277, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0131_02": [ + 0.983255, + 0.182236, + 0.0, + -2.57717, + -0.182236, + 0.983255, + 0.0, + -3.05451, + 0.0, + 0.0, + 1.0, + -0.110415, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0132_00": [ + -0.069757, + 0.997564, + 0.0, + -2.17256, + -0.997564, + -0.069757, + 0.0, + 2.27371, + 0.0, + 0.0, + 1.0, + -0.072618, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0132_01": [ + -0.743145, + 0.669131, + 0.0, + -1.13533, + -0.669131, + -0.743145, + 0.0, + 5.05688, + 0.0, + 0.0, + 1.0, + -0.09575, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0132_02": [ + -0.5, + 0.866025, + 0.0, + -0.919414, + -0.866025, + -0.5, + 0.0, + 3.889, + 0.0, + 0.0, + 1.0, + -0.078303, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0133_00": [ + 0.366501, + 0.930418, + 0.0, + -3.78981, + -0.930418, + 0.366501, + 0.0, + 4.27228, + 0.0, + 0.0, + 1.0, + -0.045996, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0134_00": [ + -0.981627, + 0.190809, + 0.0, + 1.12743, + -0.190809, + -0.981627, + 0.0, + 3.98428, + 0.0, + 0.0, + 1.0, + -0.063297, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0134_01": [ + 0.241922, + 0.970296, + 0.0, + -4.58218, + -0.970296, + 0.241922, + 0.0, + 0.57072, + 0.0, + 0.0, + 1.0, + -0.056019, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0134_02": [ + -0.809017, + 0.587785, + 0.0, + 0.663329, + -0.587785, + -0.809017, + 0.0, + 2.71209, + 0.0, + 0.0, + 1.0, + -0.050772, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0135_00": [ + -0.121869, + 0.992546, + 0.0, + -3.62763, + -0.992546, + -0.121869, + 0.0, + 1.50247, + 0.0, + 0.0, + 1.0, + -0.05045, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0136_00": [ + -0.21644, + 0.976296, + 0.0, + -1.02222, + -0.976296, + -0.21644, + 0.0, + 2.34527, + 0.0, + 0.0, + 1.0, + -0.100462, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0136_01": [ + -0.182235, + 0.983255, + 0.0, + -1.16045, + -0.983255, + -0.182235, + 0.0, + 2.28016, + 0.0, + 0.0, + 1.0, + -0.182315, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0136_02": [ + -0.061049, + 0.998135, + 0.0, + -1.20564, + -0.998135, + -0.061049, + 0.0, + 1.88512, + 0.0, + 0.0, + 1.0, + -0.125291, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0137_00": [ + 0.999048, + 0.043619, + 0.0, + -2.24313, + -0.043619, + 0.999048, + 0.0, + -4.19802, + 0.0, + 0.0, + 1.0, + -0.120295, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0137_01": [ + -0.034899, + 0.999391, + 0.0, + -2.0649, + -0.999391, + -0.034899, + 0.0, + 3.31042, + 0.0, + 0.0, + 1.0, + -0.121292, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0137_02": [ + 0.078459, + 0.996917, + 0.0, + -2.46658, + -0.996917, + 0.078459, + 0.0, + 3.43075, + 0.0, + 0.0, + 1.0, + -0.184323, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0138_00": [ + -0.453991, + 0.891006, + 0.0, + -1.5295, + -0.891006, + -0.453991, + 0.0, + 5.20755, + 0.0, + 0.0, + 1.0, + -0.061192, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0139_00": [ + 0.156434, + 0.987688, + 0.0, + -1.61152, + -0.987688, + 0.156434, + 0.0, + 1.34282, + 0.0, + 0.0, + 1.0, + -0.100993, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0140_00": [ + -0.866025, + 0.5, + 0.0, + 1.10199, + -0.5, + -0.866025, + 0.0, + 6.03599, + 0.0, + 0.0, + 1.0, + -0.10237, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0140_01": [ + 0.953717, + 0.300706, + 0.0, + -4.47708, + -0.300706, + 0.953717, + 0.0, + -3.39621, + 0.0, + 0.0, + 1.0, + -0.109579, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0141_00": [ + -0.829038, + 0.559193, + 0.0, + 1.53347, + -0.559193, + -0.829038, + 0.0, + 4.08724, + 0.0, + 0.0, + 1.0, + -0.080639, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0141_01": [ + -0.838671, + 0.544639, + 0.0, + 0.838147, + -0.544639, + -0.838671, + 0.0, + 4.64534, + 0.0, + 0.0, + 1.0, + -0.09452, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0141_02": [ + -0.034899, + 0.999391, + 0.0, + -1.75115, + -0.999391, + -0.034899, + 0.0, + 2.55475, + 0.0, + 0.0, + 1.0, + -0.116976, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0142_00": [ + -0.580703, + 0.814116, + 0.0, + -1.08683, + -0.814116, + -0.580703, + 0.0, + 5.14334, + 0.0, + 0.0, + 1.0, + -0.150761, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0142_01": [ + 0.48481, + 0.87462, + 0.0, + -4.517, + -0.87462, + 0.48481, + 0.0, + 1.08052, + 0.0, + 0.0, + 1.0, + -0.176718, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0143_00": [ + 0.622515, + 0.782608, + 0.0, + -3.0462, + -0.782608, + 0.622515, + 0.0, + 0.447871, + 0.0, + 0.0, + 1.0, + -0.059078, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0143_01": [ + -0.034899, + 0.999391, + 0.0, + -3.35066, + -0.999391, + -0.034899, + 0.0, + 3.97262, + 0.0, + 0.0, + 1.0, + -0.257179, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0143_02": [ + 0.587785, + 0.809017, + 0.0, + -4.81207, + -0.809017, + 0.587785, + 0.0, + 0.993772, + 0.0, + 0.0, + 1.0, + -0.127744, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0144_00": [ + 0.052336, + 0.99863, + 0.0, + -2.47919, + -0.99863, + 0.052336, + 0.0, + 1.46545, + 0.0, + 0.0, + 1.0, + -0.054401, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0144_01": [ + -0.026177, + 0.999657, + 0.0, + -2.70557, + -0.999657, + -0.026177, + 0.0, + 2.44068, + 0.0, + 0.0, + 1.0, + -0.052812, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0145_00": [ + 0.945519, + 0.325568, + 0.0, + -5.25071, + -0.325568, + 0.945519, + 0.0, + -2.46684, + 0.0, + 0.0, + 1.0, + -0.274742, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0146_00": [ + 0.026177, + 0.999657, + 0.0, + -1.00847, + -0.999657, + 0.026177, + 0.0, + 1.40934, + 0.0, + 0.0, + 1.0, + -0.036592, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0146_01": [ + 0.139173, + 0.990268, + 0.0, + -1.32319, + -0.990268, + 0.139173, + 0.0, + 1.29507, + 0.0, + 0.0, + 1.0, + -0.042078, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0146_02": [ + -0.104529, + 0.994522, + 0.0, + -2.80652, + -0.994522, + -0.104529, + 0.0, + 1.90254, + 0.0, + 0.0, + 1.0, + -0.04033, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0147_00": [ + -0.156434, + 0.987688, + 0.0, + -3.08605, + -0.987688, + -0.156434, + 0.0, + 4.03206, + 0.0, + 0.0, + 1.0, + -0.039847, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0147_01": [ + -0.92388, + 0.382683, + 0.0, + 1.36157, + -0.382683, + -0.92388, + 0.0, + 3.38771, + 0.0, + 0.0, + 1.0, + -0.055372, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0148_00": [ + 0.843391, + 0.5373, + 0.0, + -5.33575, + -0.5373, + 0.843391, + 0.0, + -0.632503, + 0.0, + 0.0, + 1.0, + -0.047424, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0149_00": [ + -0.382683, + 0.92388, + 0.0, + -0.983743, + -0.92388, + -0.382683, + 0.0, + 3.0252, + 0.0, + 0.0, + 1.0, + -0.113086, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0150_00": [ + 0.422618, + 0.906308, + 0.0, + -2.97331, + -0.906308, + 0.422618, + 0.0, + 2.22527, + 0.0, + 0.0, + 1.0, + -0.056288, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0150_01": [ + 0.803857, + 0.594823, + 0.0, + -4.13181, + -0.594823, + 0.803857, + 0.0, + 0.560466, + 0.0, + 0.0, + 1.0, + -0.04132, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0150_02": [ + 0.173648, + 0.984808, + 0.0, + -2.05166, + -0.984808, + 0.173648, + 0.0, + 2.57887, + 0.0, + 0.0, + 1.0, + -0.041293, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0151_00": [ + 0.559193, + 0.829038, + 0.0, + -6.59772, + -0.829038, + 0.559193, + 0.0, + 0.197706, + 0.0, + 0.0, + 1.0, + -0.056178, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0151_01": [ + -0.965926, + 0.258819, + 0.0, + 2.50357, + -0.258819, + -0.965926, + 0.0, + 5.22019, + 0.0, + 0.0, + 1.0, + -0.045262, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0152_00": [ + 0.953717, + 0.300706, + 0.0, + -4.17241, + -0.300706, + 0.953717, + 0.0, + -2.74545, + 0.0, + 0.0, + 1.0, + -0.080923, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0152_01": [ + 0.587785, + 0.809017, + 0.0, + -8.44282, + -0.809017, + 0.587785, + 0.0, + 1.08188, + 0.0, + 0.0, + 1.0, + -0.413238, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0152_02": [ + 0.034899, + 0.999391, + 0.0, + -5.62353, + -0.999391, + 0.034899, + 0.0, + 6.11943, + 0.0, + 0.0, + 1.0, + 0.0534, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0153_00": [ + -0.0, + 1.0, + 0.0, + -1.16887, + -1.0, + -0.0, + 0.0, + 2.5828, + 0.0, + 0.0, + 1.0, + -0.026067, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0153_01": [ + 1.0, + 0.0, + 0.0, + -1.25567, + 0.0, + 1.0, + 0.0, + -2.94223, + 0.0, + 0.0, + 1.0, + -0.034203, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0154_00": [ + 0.430511, + 0.902585, + 0.0, + -4.73, + -0.902585, + 0.430511, + 0.0, + 1.75085, + 0.0, + 0.0, + 1.0, + -0.155632, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0155_00": [ + -0.898794, + 0.438371, + 0.0, + 2.66515, + -0.438371, + -0.898794, + 0.0, + 5.92267, + 0.0, + 0.0, + 1.0, + -0.077111, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0155_01": [ + -0.838671, + 0.544639, + 0.0, + 1.50031, + -0.544639, + -0.838671, + 0.0, + 6.22225, + 0.0, + 0.0, + 1.0, + -0.072623, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0155_02": [ + -0.469472, + 0.882948, + 0.0, + -1.66413, + -0.882948, + -0.469472, + 0.0, + 6.08272, + 0.0, + 0.0, + 1.0, + -0.065726, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0156_00": [ + -0.984808, + 0.173648, + 0.0, + 3.12971, + -0.173648, + -0.984808, + 0.0, + 3.83886, + 0.0, + 0.0, + 1.0, + -0.085282, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0157_00": [ + -0.087156, + 0.996195, + 0.0, + -1.37922, + -0.996195, + -0.087156, + 0.0, + 2.02028, + 0.0, + 0.0, + 1.0, + -0.082894, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0157_01": [ + -0.515038, + 0.857167, + 0.0, + -0.899403, + -0.857167, + -0.515038, + 0.0, + 2.41607, + 0.0, + 0.0, + 1.0, + -0.030256, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0158_00": [ + 0.026177, + 0.999657, + 0.0, + -1.35874, + -0.999657, + 0.026177, + 0.0, + 2.295, + 0.0, + 0.0, + 1.0, + -0.137452, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0158_01": [ + 0.113203, + 0.993572, + 0.0, + -1.62772, + -0.993572, + 0.113203, + 0.0, + 2.22663, + 0.0, + 0.0, + 1.0, + -0.134599, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0158_02": [ + 0.034899, + 0.999391, + 0.0, + -1.37591, + -0.999391, + 0.034899, + 0.0, + 2.23543, + 0.0, + 0.0, + 1.0, + -0.102845, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0159_00": [ + 0.406737, + 0.913545, + 0.0, + -3.94965, + -0.913545, + 0.406737, + 0.0, + 2.40634, + 0.0, + 0.0, + 1.0, + -0.101086, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0160_00": [ + 0.891007, + 0.453991, + 0.0, + -4.45705, + -0.453991, + 0.891007, + 0.0, + -1.03218, + 0.0, + 0.0, + 1.0, + -0.054909, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0160_01": [ + -0.461749, + 0.887011, + 0.0, + -2.40526, + -0.887011, + -0.461749, + 0.0, + 5.50273, + 0.0, + 0.0, + 1.0, + -0.043314, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0160_02": [ + 0.913545, + 0.406737, + 0.0, + -4.73168, + -0.406737, + 0.913545, + 0.0, + -2.47702, + 0.0, + 0.0, + 1.0, + -0.04697, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0160_03": [ + 0.920505, + 0.390731, + 0.0, + -3.99985, + -0.390731, + 0.920505, + 0.0, + -0.736771, + 0.0, + 0.0, + 1.0, + -0.033821, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0160_04": [ + -0.97437, + 0.224951, + 0.0, + 1.63353, + -0.224951, + -0.97437, + 0.0, + 2.73235, + 0.0, + 0.0, + 1.0, + -0.050229, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0161_00": [ + 0.422618, + 0.906308, + 0.0, + -2.59295, + -0.906308, + 0.422618, + 0.0, + 0.547347, + 0.0, + 0.0, + 1.0, + -0.022703, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0161_01": [ + 0.991445, + 0.130526, + 0.0, + -2.49889, + -0.130526, + 0.991445, + 0.0, + -1.05994, + 0.0, + 0.0, + 1.0, + -0.025338, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0161_02": [ + 0.461749, + 0.887011, + 0.0, + -2.65303, + -0.887011, + 0.461749, + 0.0, + 0.331827, + 0.0, + 0.0, + 1.0, + -0.039139, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0162_00": [ + 0.398749, + 0.91706, + 0.0, + -4.53289, + -0.91706, + 0.398749, + 0.0, + 2.90447, + 0.0, + 0.0, + 1.0, + -0.061532, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0163_00": [ + -0.317305, + 0.948324, + 0.0, + -2.46596, + -0.948324, + -0.317305, + 0.0, + 4.74427, + 0.0, + 0.0, + 1.0, + -0.068492, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0163_01": [ + 0.970296, + 0.241922, + 0.0, + -3.29424, + -0.241922, + 0.970296, + 0.0, + -2.8864, + 0.0, + 0.0, + 1.0, + -0.057903, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0164_00": [ + 0.139173, + 0.990268, + 0.0, + -1.92568, + -0.990268, + 0.139173, + 0.0, + 1.30502, + 0.0, + 0.0, + 1.0, + -0.06662, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0164_01": [ + 0.25038, + 0.968148, + 0.0, + -2.37826, + -0.968148, + 0.25038, + 0.0, + 1.23302, + 0.0, + 0.0, + 1.0, + -0.131, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0164_02": [ + 0.788011, + 0.615662, + 0.0, + -3.11829, + -0.615662, + 0.788011, + 0.0, + -0.474468, + 0.0, + 0.0, + 1.0, + -0.082067, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0164_03": [ + -0.275637, + 0.961262, + 0.0, + -1.223, + -0.961262, + -0.275637, + 0.0, + 2.52742, + 0.0, + 0.0, + 1.0, + -0.049294, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0165_00": [ + 0.422618, + 0.906308, + 0.0, + -3.62681, + -0.906308, + 0.422618, + 0.0, + 3.22774, + 0.0, + 0.0, + 1.0, + -0.058109, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0165_01": [ + -0.522498, + 0.85264, + 0.0, + -1.1773, + -0.85264, + -0.522498, + 0.0, + 3.51551, + 0.0, + 0.0, + 1.0, + -0.094627, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0165_02": [ + -0.241922, + 0.970296, + 0.0, + -3.03719, + -0.970296, + -0.241922, + 0.0, + 3.08269, + 0.0, + 0.0, + 1.0, + -0.075517, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0166_00": [ + 0.99863, + 0.052336, + 0.0, + -2.37829, + -0.052336, + 0.99863, + 0.0, + -2.09749, + 0.0, + 0.0, + 1.0, + -0.201176, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0166_01": [ + -0.069757, + 0.997564, + 0.0, + -1.64409, + -0.997564, + -0.069757, + 0.0, + 2.24877, + 0.0, + 0.0, + 1.0, + -0.137063, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0166_02": [ + -0.398749, + 0.91706, + 0.0, + -1.30049, + -0.91706, + -0.398749, + 0.0, + 3.14142, + 0.0, + 0.0, + 1.0, + -0.110313, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0167_00": [ + -0.406737, + 0.913545, + 0.0, + -0.873624, + -0.913545, + -0.406737, + 0.0, + 5.1253, + 0.0, + 0.0, + 1.0, + -0.096134, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0168_00": [ + 0.798636, + 0.601815, + 0.0, + -4.81612, + -0.601815, + 0.798636, + 0.0, + -0.946926, + 0.0, + 0.0, + 1.0, + -0.055831, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0168_01": [ + -0.173648, + 0.984808, + 0.0, + -2.27383, + -0.984808, + -0.173648, + 0.0, + 3.40113, + 0.0, + 0.0, + 1.0, + -0.08192, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0168_02": [ + 0.91706, + 0.398749, + 0.0, + -4.20675, + -0.398749, + 0.91706, + 0.0, + -1.92228, + 0.0, + 0.0, + 1.0, + -0.110201, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0169_00": [ + 0.430511, + 0.902585, + 0.0, + -5.75701, + -0.902585, + 0.430511, + 0.0, + 2.43714, + 0.0, + 0.0, + 1.0, + -0.058867, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0169_01": [ + 0.580703, + 0.814116, + 0.0, + -6.07745, + -0.814116, + 0.580703, + 0.0, + 0.983786, + 0.0, + 0.0, + 1.0, + -0.068742, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0170_00": [ + 0.165048, + 0.986286, + 0.0, + -1.87529, + -0.986286, + 0.165048, + 0.0, + 1.92701, + 0.0, + 0.0, + 1.0, + -0.050419, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0170_01": [ + 0.043619, + 0.999048, + 0.0, + -3.50844, + -0.999048, + 0.043619, + 0.0, + 1.35677, + 0.0, + 0.0, + 1.0, + -0.044631, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0170_02": [ + 0.069757, + 0.997564, + 0.0, + -1.50381, + -0.997564, + 0.069757, + 0.0, + 1.34401, + 0.0, + 0.0, + 1.0, + -0.082108, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0171_00": [ + -0.961262, + 0.275637, + 0.0, + 1.66652, + -0.275637, + -0.961262, + 0.0, + 4.03301, + 0.0, + 0.0, + 1.0, + -0.048327, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0171_01": [ + 0.477159, + 0.878817, + 0.0, + -6.19053, + -0.878817, + 0.477159, + 0.0, + 1.36858, + 0.0, + 0.0, + 1.0, + -0.070106, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0172_00": [ + -0.999848, + 0.017453, + 0.0, + 3.25246, + -0.017453, + -0.999848, + 0.0, + 1.88087, + 0.0, + 0.0, + 1.0, + -0.126, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0172_01": [ + -0.095846, + 0.995396, + 0.0, + -1.16474, + -0.995396, + -0.095846, + 0.0, + 3.68477, + 0.0, + 0.0, + 1.0, + -0.073965, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0173_00": [ + 0.422618, + 0.906308, + 0.0, + -6.43633, + -0.906308, + 0.422618, + 0.0, + 3.62324, + 0.0, + 0.0, + 1.0, + -0.105978, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0173_01": [ + -0.97237, + 0.233445, + 0.0, + 3.33908, + -0.233445, + -0.97237, + 0.0, + 4.86479, + 0.0, + 0.0, + 1.0, + -0.092888, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0173_02": [ + 0.848048, + 0.529919, + 0.0, + -5.28166, + -0.529919, + 0.848048, + 0.0, + -2.0476, + 0.0, + 0.0, + 1.0, + -0.087724, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0174_00": [ + -0.374607, + 0.927184, + 0.0, + -1.13249, + -0.927184, + -0.374607, + 0.0, + 2.218, + 0.0, + 0.0, + 1.0, + -0.072651, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0174_01": [ + -0.147809, + 0.989016, + 0.0, + -1.14971, + -0.989016, + -0.147809, + 0.0, + 1.93029, + 0.0, + 0.0, + 1.0, + -0.071425, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0175_00": [ + 0.034899, + 0.999391, + 0.0, + -2.08639, + -0.999391, + 0.034899, + 0.0, + 3.86035, + 0.0, + 0.0, + 1.0, + -0.067646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0176_00": [ + 0.656059, + 0.75471, + 0.0, + -3.62717, + -0.75471, + 0.656059, + 0.0, + -0.289884, + 0.0, + 0.0, + 1.0, + -0.048698, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0177_00": [ + -0.258819, + 0.965926, + 0.0, + -1.02796, + -0.965926, + -0.258819, + 0.0, + 4.53776, + 0.0, + 0.0, + 1.0, + -0.063458, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0177_01": [ + -0.71934, + 0.694658, + 0.0, + 1.02686, + -0.694658, + -0.71934, + 0.0, + 5.00183, + 0.0, + 0.0, + 1.0, + -0.057554, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0177_02": [ + 0.121869, + 0.992546, + 0.0, + -2.81039, + -0.992546, + 0.121869, + 0.0, + 4.12454, + 0.0, + 0.0, + 1.0, + -0.04868, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0178_00": [ + 0.121869, + 0.992546, + 0.0, + -2.78325, + -0.992546, + 0.121869, + 0.0, + 2.7779, + 0.0, + 0.0, + 1.0, + -0.099616, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0179_00": [ + 0.017452, + 0.999848, + 0.0, + -2.07846, + -0.999848, + 0.017452, + 0.0, + 2.57508, + 0.0, + 0.0, + 1.0, + -0.061005, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0180_00": [ + -0.999962, + 0.008727, + 0.0, + 3.55381, + -0.008727, + -0.999962, + 0.0, + 6.80403, + 0.0, + 0.0, + 1.0, + 0.087065, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0181_00": [ + 0.398749, + 0.91706, + 0.0, + -5.10775, + -0.91706, + 0.398749, + 0.0, + 2.53263, + 0.0, + 0.0, + 1.0, + -0.13381, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0181_01": [ + -0.241922, + 0.970296, + 0.0, + -1.98004, + -0.970296, + -0.241922, + 0.0, + 4.20232, + 0.0, + 0.0, + 1.0, + -0.12787, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0181_02": [ + -0.052336, + 0.99863, + 0.0, + -2.09173, + -0.99863, + -0.052336, + 0.0, + 4.37177, + 0.0, + 0.0, + 1.0, + -0.081846, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0181_03": [ + 0.608761, + 0.793353, + 0.0, + -4.21819, + -0.793353, + 0.608761, + 0.0, + 0.571628, + 0.0, + 0.0, + 1.0, + -0.060803, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0182_00": [ + -0.017452, + 0.999848, + 0.0, + -1.63836, + -0.999848, + -0.017452, + 0.0, + 2.06966, + 0.0, + 0.0, + 1.0, + -2.1533, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0182_01": [ + -0.979925, + 0.199368, + 0.0, + 1.43977, + -0.199368, + -0.979925, + 0.0, + 2.35993, + 0.0, + 0.0, + 1.0, + -1.72063, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0182_02": [ + -0.052336, + 0.99863, + 0.0, + -1.09066, + -0.99863, + -0.052336, + 0.0, + 1.85233, + 0.0, + 0.0, + 1.0, + -1.59702, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0183_00": [ + -0.882948, + 0.469472, + 0.0, + 0.247907, + -0.469472, + -0.882948, + 0.0, + 2.21436, + 0.0, + 0.0, + 1.0, + -0.021384, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0184_00": [ + -0.987688, + 0.156434, + 0.0, + 2.01713, + -0.156434, + -0.987688, + 0.0, + 2.75672, + 0.0, + 0.0, + 1.0, + -0.108672, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0185_00": [ + -0.688354, + 0.725374, + 0.0, + -1.06465, + -0.725374, + -0.688354, + 0.0, + 8.05953, + 0.0, + 0.0, + 1.0, + -0.073369, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0186_00": [ + -0.642788, + 0.766044, + 0.0, + -0.787929, + -0.766044, + -0.642788, + 0.0, + 6.08849, + 0.0, + 0.0, + 1.0, + -0.162458, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0186_01": [ + -0.848048, + 0.529919, + 0.0, + 0.802565, + -0.529919, + -0.848048, + 0.0, + 4.14053, + 0.0, + 0.0, + 1.0, + -0.043281, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0187_00": [ + -0.793353, + 0.608761, + 0.0, + 0.426157, + -0.608761, + -0.793353, + 0.0, + 7.0119, + 0.0, + 0.0, + 1.0, + -0.045369, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0187_01": [ + -0.182235, + 0.983255, + 0.0, + -3.70435, + -0.983255, + -0.182235, + 0.0, + 5.66573, + 0.0, + 0.0, + 1.0, + -0.040016, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0188_00": [ + 0.438371, + 0.898794, + 0.0, + -4.04865, + -0.898794, + 0.438371, + 0.0, + 1.15994, + 0.0, + 0.0, + 1.0, + -0.155686, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0189_00": [ + 0.636078, + 0.771625, + 0.0, + -9.98552, + -0.771625, + 0.636078, + 0.0, + 2.14094, + 0.0, + 0.0, + 1.0, + -0.084151, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0190_00": [ + -0.130526, + 0.991445, + 0.0, + -1.31791, + -0.991445, + -0.130526, + 0.0, + 2.14784, + 0.0, + 0.0, + 1.0, + -0.031849, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0191_00": [ + -0.21644, + 0.976296, + 0.0, + -1.01457, + -0.976296, + -0.21644, + 0.0, + 3.91808, + 0.0, + 0.0, + 1.0, + -0.070665, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0191_01": [ + 0.656059, + 0.75471, + 0.0, + -4.79916, + -0.75471, + 0.656059, + 0.0, + -0.401519, + 0.0, + 0.0, + 1.0, + -0.076288, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0191_02": [ + 0.317305, + 0.948324, + 0.0, + -3.08499, + -0.948324, + 0.317305, + 0.0, + 1.23554, + 0.0, + 0.0, + 1.0, + -0.07611, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0192_00": [ + -0.190809, + 0.981627, + 0.0, + -2.20428, + -0.981627, + -0.190809, + 0.0, + 3.55104, + 0.0, + 0.0, + 1.0, + -0.081263, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0192_01": [ + 0.026177, + 0.999657, + 0.0, + -3.69782, + -0.999657, + 0.026177, + 0.0, + 3.60966, + 0.0, + 0.0, + 1.0, + -0.119706, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0192_02": [ + 0.580703, + 0.814116, + 0.0, + -4.28904, + -0.814116, + 0.580703, + 0.0, + 1.50138, + 0.0, + 0.0, + 1.0, + -0.078344, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0193_00": [ + -0.121869, + 0.992546, + 0.0, + -1.85715, + -0.992546, + -0.121869, + 0.0, + 2.7391, + 0.0, + 0.0, + 1.0, + -0.029667, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0193_01": [ + 0.461749, + 0.887011, + 0.0, + -2.90547, + -0.887011, + 0.461749, + 0.0, + 0.734007, + 0.0, + 0.0, + 1.0, + -0.023235, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0194_00": [ + -0.104529, + 0.994522, + 0.0, + -2.52267, + -0.994522, + -0.104529, + 0.0, + 3.11449, + 0.0, + 0.0, + 1.0, + -0.125777, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0195_00": [ + 0.382683, + 0.92388, + 0.0, + -3.20054, + -0.92388, + 0.382683, + 0.0, + 2.35309, + 0.0, + 0.0, + 1.0, + -0.036924, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0195_01": [ + 0.406737, + 0.913545, + 0.0, + -3.04474, + -0.913545, + 0.406737, + 0.0, + 2.32494, + 0.0, + 0.0, + 1.0, + -0.065296, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0195_02": [ + 0.551937, + 0.833886, + 0.0, + -3.26842, + -0.833886, + 0.551937, + 0.0, + 1.47037, + 0.0, + 0.0, + 1.0, + -0.106542, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0196_00": [ + 0.061049, + 0.998135, + 0.0, + -3.10306, + -0.998135, + 0.061049, + 0.0, + 3.77948, + 0.0, + 0.0, + 1.0, + -0.061373, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0197_00": [ + 0.948324, + 0.317305, + 0.0, + -2.93448, + -0.317305, + 0.948324, + 0.0, + -3.01443, + 0.0, + 0.0, + 1.0, + -0.180137, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0197_01": [ + 0.258819, + 0.965926, + 0.0, + -2.81257, + -0.965926, + 0.258819, + 0.0, + 3.50009, + 0.0, + 0.0, + 1.0, + -0.153855, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0197_02": [ + 0.998135, + 0.061049, + 0.0, + -1.5273, + -0.061049, + 0.998135, + 0.0, + -4.28543, + 0.0, + 0.0, + 1.0, + -0.164012, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0198_00": [ + -0.113203, + 0.993572, + 0.0, + -0.974671, + -0.993572, + -0.113203, + 0.0, + 2.13294, + 0.0, + 0.0, + 1.0, + -0.046181, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0199_00": [ + 0.275637, + 0.961262, + 0.0, + -3.88379, + -0.961262, + 0.275637, + 0.0, + 2.58449, + 0.0, + 0.0, + 1.0, + -0.089676, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0200_00": [ + 0.999848, + 0.017452, + 0.0, + -2.15888, + -0.017452, + 0.999848, + 0.0, + -2.31909, + 0.0, + 0.0, + 1.0, + -0.018433, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0200_01": [ + -0.97437, + 0.224951, + 0.0, + 1.02195, + -0.224951, + -0.97437, + 0.0, + 2.63564, + 0.0, + 0.0, + 1.0, + -0.029748, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0200_02": [ + -0.130526, + 0.991445, + 0.0, + -2.3487, + -0.991445, + -0.130526, + 0.0, + 2.49319, + 0.0, + 0.0, + 1.0, + -0.048065, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0201_00": [ + -0.986286, + 0.165048, + 0.0, + 3.31181, + -0.165048, + -0.986286, + 0.0, + 4.60089, + 0.0, + 0.0, + 1.0, + -0.052875, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0201_01": [ + 0.857167, + 0.515038, + 0.0, + -4.56996, + -0.515038, + 0.857167, + 0.0, + -1.2346, + 0.0, + 0.0, + 1.0, + -0.052159, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0201_02": [ + 0.382683, + 0.92388, + 0.0, + -5.40408, + -0.92388, + 0.382683, + 0.0, + 2.40474, + 0.0, + 0.0, + 1.0, + -0.06413, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0202_00": [ + 0.358368, + 0.93358, + 0.0, + -3.47039, + -0.93358, + 0.358368, + 0.0, + 2.5909, + 0.0, + 0.0, + 1.0, + -0.058003, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0203_00": [ + -0.95882, + 0.284015, + 0.0, + 1.58093, + -0.284015, + -0.95882, + 0.0, + 3.61876, + 0.0, + 0.0, + 1.0, + -0.091591, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0203_01": [ + 0.930418, + 0.366501, + 0.0, + -5.59153, + -0.366501, + 0.930418, + 0.0, + -3.85579, + 0.0, + 0.0, + 1.0, + -0.101692, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0203_02": [ + 0.996195, + 0.087156, + 0.0, + -2.84598, + -0.087156, + 0.996195, + 0.0, + -3.3868, + 0.0, + 0.0, + 1.0, + -0.171737, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0204_00": [ + -0.861629, + 0.507538, + 0.0, + 0.063496, + -0.507538, + -0.861629, + 0.0, + 5.13649, + 0.0, + 0.0, + 1.0, + -0.036898, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0204_01": [ + 0.358368, + 0.93358, + 0.0, + -4.1491, + -0.93358, + 0.358368, + 0.0, + 2.84125, + 0.0, + 0.0, + 1.0, + -0.043522, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0204_02": [ + 0.979925, + 0.199368, + 0.0, + -3.07076, + -0.199368, + 0.979925, + 0.0, + -3.3124, + 0.0, + 0.0, + 1.0, + -0.077926, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0205_00": [ + 0.008727, + 0.999962, + 0.0, + -1.53534, + -0.999962, + 0.008727, + 0.0, + 1.57895, + 0.0, + 0.0, + 1.0, + -0.058982, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0205_01": [ + 0.999391, + 0.034899, + 0.0, + -1.77839, + -0.034899, + 0.999391, + 0.0, + -3.84217, + 0.0, + 0.0, + 1.0, + -0.05875, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0205_02": [ + 0.997564, + 0.069757, + 0.0, + -1.53502, + -0.069757, + 0.997564, + 0.0, + -3.84927, + 0.0, + 0.0, + 1.0, + -0.033707, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0206_00": [ + -0.515038, + 0.857167, + 0.0, + -0.659178, + -0.857167, + -0.515038, + 0.0, + 4.17203, + 0.0, + 0.0, + 1.0, + -0.125778, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0206_01": [ + 0.095846, + 0.995396, + 0.0, + -2.70921, + -0.995396, + 0.095846, + 0.0, + 2.71341, + 0.0, + 0.0, + 1.0, + -0.114338, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0206_02": [ + 0.087156, + 0.996195, + 0.0, + -2.71478, + -0.996195, + 0.087156, + 0.0, + 2.39786, + 0.0, + 0.0, + 1.0, + -0.124468, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0207_00": [ + -0.5, + 0.866025, + 0.0, + -0.590182, + -0.866025, + -0.5, + 0.0, + 6.23624, + 0.0, + 0.0, + 1.0, + -0.049081, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0207_01": [ + -0.414693, + 0.909961, + 0.0, + -0.861119, + -0.909961, + -0.414693, + 0.0, + 5.96906, + 0.0, + 0.0, + 1.0, + -0.067238, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0207_02": [ + 0.71934, + 0.694658, + 0.0, + -6.40161, + -0.694658, + 0.71934, + 0.0, + -0.510878, + 0.0, + 0.0, + 1.0, + -0.058275, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0208_00": [ + -0.961262, + 0.275637, + 0.0, + 3.35371, + -0.275637, + -0.961262, + 0.0, + 6.83783, + 0.0, + 0.0, + 1.0, + -0.104764, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0209_00": [ + -0.034899, + 0.999391, + 0.0, + -1.74868, + -0.999391, + -0.034899, + 0.0, + 4.17096, + 0.0, + 0.0, + 1.0, + -0.154, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0209_01": [ + -0.75471, + 0.656059, + 0.0, + 0.191082, + -0.656059, + -0.75471, + 0.0, + 4.42378, + 0.0, + 0.0, + 1.0, + -0.11328, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0209_02": [ + -0.34202, + 0.939693, + 0.0, + -0.967551, + -0.939693, + -0.34202, + 0.0, + 5.51945, + 0.0, + 0.0, + 1.0, + -0.149979, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0210_00": [ + -0.34202, + 0.939693, + 0.0, + -1.02007, + -0.939693, + -0.34202, + 0.0, + 2.69059, + 0.0, + 0.0, + 1.0, + -0.081162, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0210_01": [ + 0.993572, + 0.113203, + 0.0, + -1.62462, + -0.113203, + 0.993572, + 0.0, + -3.83241, + 0.0, + 0.0, + 1.0, + -0.079286, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0211_00": [ + 0.139173, + 0.990268, + 0.0, + -2.12902, + -0.990268, + 0.139173, + 0.0, + 1.66443, + 0.0, + 0.0, + 1.0, + -0.040356, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0211_01": [ + -0.173648, + 0.984808, + 0.0, + -1.51873, + -0.984808, + -0.173648, + 0.0, + 2.45328, + 0.0, + 0.0, + 1.0, + -0.049972, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0211_02": [ + 0.970296, + 0.241922, + 0.0, + -2.47336, + -0.241922, + 0.970296, + 0.0, + -1.87874, + 0.0, + 0.0, + 1.0, + -0.101186, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0211_03": [ + -0.382683, + 0.92388, + 0.0, + -1.21941, + -0.92388, + -0.382683, + 0.0, + 2.93776, + 0.0, + 0.0, + 1.0, + -0.04607, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0212_00": [ + -0.5373, + 0.843391, + 0.0, + -0.935564, + -0.843391, + -0.5373, + 0.0, + 4.94927, + 0.0, + 0.0, + 1.0, + -0.072673, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0212_01": [ + -0.594823, + 0.803857, + 0.0, + -0.964424, + -0.803857, + -0.594823, + 0.0, + 6.33058, + 0.0, + 0.0, + 1.0, + -0.046179, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0212_02": [ + 0.477159, + 0.878817, + 0.0, + -6.42682, + -0.878817, + 0.477159, + 0.0, + 1.71239, + 0.0, + 0.0, + 1.0, + -0.090016, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0213_00": [ + -0.390731, + 0.920505, + 0.0, + -1.68446, + -0.920505, + -0.390731, + 0.0, + 6.09395, + 0.0, + 0.0, + 1.0, + -0.294534, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0214_00": [ + 0.414693, + 0.909961, + 0.0, + -2.94734, + -0.909961, + 0.414693, + 0.0, + 1.57053, + 0.0, + 0.0, + 1.0, + -0.077962, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0214_01": [ + -0.995396, + 0.095846, + 0.0, + 2.3099, + -0.095846, + -0.995396, + 0.0, + 4.17234, + 0.0, + 0.0, + 1.0, + -0.062808, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0214_02": [ + -0.824126, + 0.566406, + 0.0, + 1.47632, + -0.566406, + -0.824126, + 0.0, + 4.90019, + 0.0, + 0.0, + 1.0, + -0.081005, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0215_00": [ + 0.34202, + 0.939693, + 0.0, + -4.63823, + -0.939693, + 0.34202, + 0.0, + 2.72663, + 0.0, + 0.0, + 1.0, + -0.067719, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0215_01": [ + 0.21644, + 0.976296, + 0.0, + -4.31609, + -0.976296, + 0.21644, + 0.0, + 2.41078, + 0.0, + 0.0, + 1.0, + -0.056318, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0216_00": [ + 1.0, + 0.0, + 0.0, + -1.77449, + 0.0, + 1.0, + 0.0, + -1.39107, + 0.0, + 0.0, + 1.0, + -0.03701, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0217_00": [ + -0.017452, + 0.999848, + 0.0, + -1.95665, + -0.999848, + -0.017452, + 0.0, + 2.63521, + 0.0, + 0.0, + 1.0, + -0.041526, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0218_00": [ + -0.906308, + 0.422618, + 0.0, + 1.67551, + -0.422618, + -0.906308, + 0.0, + 9.02071, + 0.0, + 0.0, + 1.0, + -0.088166, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0218_01": [ + 0.788011, + 0.615662, + 0.0, + -6.95254, + -0.615662, + 0.788011, + 0.0, + -0.845149, + 0.0, + 0.0, + 1.0, + -0.055455, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0219_00": [ + 0.748956, + 0.66262, + 0.0, + -4.03947, + -0.66262, + 0.748956, + 0.0, + 0.10766, + 0.0, + 0.0, + 1.0, + -0.023758, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0220_00": [ + -0.078459, + 0.996917, + 0.0, + -1.76305, + -0.996917, + -0.078459, + 0.0, + 5.51855, + 0.0, + 0.0, + 1.0, + -0.206312, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0220_01": [ + -0.866025, + 0.5, + 0.0, + 2.49506, + -0.5, + -0.866025, + 0.0, + 5.93557, + 0.0, + 0.0, + 1.0, + -0.137851, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0220_02": [ + -0.669131, + 0.743145, + 0.0, + 0.680006, + -0.743145, + -0.669131, + 0.0, + 5.56966, + 0.0, + 0.0, + 1.0, + -0.126828, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0221_00": [ + 0.382683, + 0.92388, + 0.0, + -4.61483, + -0.92388, + 0.382683, + 0.0, + 3.06122, + 0.0, + 0.0, + 1.0, + -0.115646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0221_01": [ + -0.017452, + 0.999848, + 0.0, + -1.86031, + -0.999848, + -0.017452, + 0.0, + 4.28865, + 0.0, + 0.0, + 1.0, + -0.108188, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0222_00": [ + 0.034899, + 0.999391, + 0.0, + -1.98132, + -0.999391, + 0.034899, + 0.0, + 3.87444, + 0.0, + 0.0, + 1.0, + -0.097646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0222_01": [ + 0.992546, + 0.121869, + 0.0, + -2.36153, + -0.121869, + 0.992546, + 0.0, + -2.78276, + 0.0, + 0.0, + 1.0, + -0.145216, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0223_00": [ + -0.325568, + 0.945519, + 0.0, + -1.17297, + -0.945519, + -0.325568, + 0.0, + 4.14962, + 0.0, + 0.0, + 1.0, + -0.052801, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0223_01": [ + 0.743145, + 0.669131, + 0.0, + -5.10929, + -0.669131, + 0.743145, + 0.0, + 0.097175, + 0.0, + 0.0, + 1.0, + -0.03276, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0223_02": [ + -0.998135, + 0.061049, + 0.0, + 2.05895, + -0.061049, + -0.998135, + 0.0, + 2.72632, + 0.0, + 0.0, + 1.0, + -0.034442, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0224_00": [ + -0.968148, + 0.25038, + 0.0, + 0.664919, + -0.25038, + -0.968148, + 0.0, + 2.47148, + 0.0, + 0.0, + 1.0, + -0.03841, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0225_00": [ + -0.700909, + 0.71325, + 0.0, + -0.974614, + -0.71325, + -0.700909, + 0.0, + 6.59177, + 0.0, + 0.0, + 1.0, + -0.075135, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0226_00": [ + -0.087156, + 0.996195, + 0.0, + -4.14495, + -0.996195, + -0.087156, + 0.0, + 4.64251, + 0.0, + 0.0, + 1.0, + -0.073019, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0226_01": [ + -0.130526, + 0.991445, + 0.0, + -3.73985, + -0.991445, + -0.130526, + 0.0, + 4.95435, + 0.0, + 0.0, + 1.0, + -0.057038, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0227_00": [ + 0.866025, + 0.5, + 0.0, + -3.56243, + -0.5, + 0.866025, + 0.0, + -2.13335, + 0.0, + 0.0, + 1.0, + -0.034723, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0228_00": [ + 0.173648, + 0.984808, + 0.0, + -3.32325, + -0.984808, + 0.173648, + 0.0, + 2.60059, + 0.0, + 0.0, + 1.0, + -0.117013, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0229_00": [ + 0.649448, + 0.760406, + 0.0, + -4.32181, + -0.760406, + 0.649448, + 0.0, + 0.205678, + 0.0, + 0.0, + 1.0, + -0.0515, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0229_01": [ + -0.390731, + 0.920505, + 0.0, + -2.12876, + -0.920505, + -0.390731, + 0.0, + 2.98518, + 0.0, + 0.0, + 1.0, + -0.04304, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0229_02": [ + 0.430511, + 0.902585, + 0.0, + -3.77023, + -0.902585, + 0.430511, + 0.0, + 1.54437, + 0.0, + 0.0, + 1.0, + -0.047897, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0230_00": [ + 0.069757, + 0.997564, + 0.0, + -2.08957, + -0.997564, + 0.069757, + 0.0, + 1.87107, + 0.0, + 0.0, + 1.0, + -0.081353, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0231_00": [ + 0.91706, + 0.398749, + 0.0, + -6.21988, + -0.398749, + 0.91706, + 0.0, + -2.37449, + 0.0, + 0.0, + 1.0, + -0.093614, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0231_01": [ + 0.492424, + 0.870356, + 0.0, + -6.23895, + -0.870356, + 0.492424, + 0.0, + 1.67998, + 0.0, + 0.0, + 1.0, + -0.13443, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0231_02": [ + -0.069757, + 0.997564, + 0.0, + -3.82711, + -0.997564, + -0.069757, + 0.0, + 4.79048, + 0.0, + 0.0, + 1.0, + -0.148208, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0232_00": [ + -0.113203, + 0.993572, + 0.0, + -1.30617, + -0.993572, + -0.113203, + 0.0, + 2.05081, + 0.0, + 0.0, + 1.0, + -0.114, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0232_01": [ + -0.999048, + 0.043619, + 0.0, + 1.38858, + -0.043619, + -0.999048, + 0.0, + 2.12539, + 0.0, + 0.0, + 1.0, + -0.061307, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0232_02": [ + 0.965926, + 0.258819, + 0.0, + -2.21594, + -0.258819, + 0.965926, + 0.0, + -1.21257, + 0.0, + 0.0, + 1.0, + -0.049723, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0233_00": [ + 0.642788, + 0.766044, + 0.0, + -3.95232, + -0.766044, + 0.642788, + 0.0, + 0.336446, + 0.0, + 0.0, + 1.0, + -0.088015, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0233_01": [ + -0.743145, + 0.669131, + 0.0, + -0.182563, + -0.669131, + -0.743145, + 0.0, + 4.48724, + 0.0, + 0.0, + 1.0, + -0.117379, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0234_00": [ + 0.909961, + 0.414693, + 0.0, + -4.34815, + -0.414693, + 0.909961, + 0.0, + -3.14336, + 0.0, + 0.0, + 1.0, + -0.094837, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0235_00": [ + 0.233445, + 0.97237, + 0.0, + -3.19981, + -0.97237, + 0.233445, + 0.0, + 2.71242, + 0.0, + 0.0, + 1.0, + -0.051164, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0236_00": [ + 0.99863, + 0.052336, + 0.0, + -4.19151, + -0.052336, + 0.99863, + 0.0, + -3.91774, + 0.0, + 0.0, + 1.0, + -0.116026, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0236_01": [ + -0.0, + 1.0, + 0.0, + -4.09934, + -1.0, + -0.0, + 0.0, + 1.91823, + 0.0, + 0.0, + 1.0, + -0.080089, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0237_00": [ + -0.325568, + 0.945519, + 0.0, + -0.837008, + -0.945519, + -0.325568, + 0.0, + 1.70714, + 0.0, + 0.0, + 1.0, + -0.073943, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0237_01": [ + 0.968148, + 0.25038, + 0.0, + -1.90368, + -0.25038, + 0.968148, + 0.0, + -0.951876, + 0.0, + 0.0, + 1.0, + -0.119364, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0238_00": [ + 0.453991, + 0.891007, + 0.0, + -4.07586, + -0.891007, + 0.453991, + 0.0, + 2.3553, + 0.0, + 0.0, + 1.0, + -0.090034, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0238_01": [ + -0.008727, + 0.999962, + 0.0, + -1.56013, + -0.999962, + -0.008727, + 0.0, + 2.66506, + 0.0, + 0.0, + 1.0, + -0.08111, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0239_00": [ + 0.267238, + 0.96363, + 0.0, + -2.97638, + -0.96363, + 0.267238, + 0.0, + 2.84691, + 0.0, + 0.0, + 1.0, + -0.097258, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0239_01": [ + 0.113203, + 0.993572, + 0.0, + -2.47249, + -0.993572, + 0.113203, + 0.0, + 3.07533, + 0.0, + 0.0, + 1.0, + -0.073059, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0239_02": [ + -0.406737, + 0.913545, + 0.0, + -1.47128, + -0.913545, + -0.406737, + 0.0, + 3.31665, + 0.0, + 0.0, + 1.0, + -0.126619, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0240_00": [ + 0.017452, + 0.999848, + 0.0, + -1.37701, + -0.999848, + 0.017452, + 0.0, + 1.64995, + 0.0, + 0.0, + 1.0, + -0.058414, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0241_00": [ + 0.430511, + 0.902585, + 0.0, + -2.63539, + -0.902585, + 0.430511, + 0.0, + 0.848067, + 0.0, + 0.0, + 1.0, + -0.040154, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0241_01": [ + 0.976296, + 0.21644, + 0.0, + -2.14246, + -0.21644, + 0.976296, + 0.0, + -1.40033, + 0.0, + 0.0, + 1.0, + -0.022127, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0241_02": [ + 0.951057, + 0.309017, + 0.0, + -2.3704, + -0.309017, + 0.951057, + 0.0, + -1.25569, + 0.0, + 0.0, + 1.0, + -0.018576, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0242_00": [ + 0.507538, + 0.861629, + 0.0, + -2.78016, + -0.861629, + 0.507538, + 0.0, + 1.01386, + 0.0, + 0.0, + 1.0, + -0.034927, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0242_01": [ + -0.350207, + 0.936672, + 0.0, + -1.10846, + -0.936672, + -0.350207, + 0.0, + 2.267, + 0.0, + 0.0, + 1.0, + -0.059051, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0242_02": [ + -0.199368, + 0.979925, + 0.0, + -1.53786, + -0.979925, + -0.199368, + 0.0, + 2.1043, + 0.0, + 0.0, + 1.0, + -0.031114, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0243_00": [ + -0.139173, + 0.990268, + 0.0, + -1.26842, + -0.990268, + -0.139173, + 0.0, + 2.03643, + 0.0, + 0.0, + 1.0, + -0.082351, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0244_00": [ + 0.961262, + 0.275637, + 0.0, + -3.2659, + -0.275637, + 0.961262, + 0.0, + -1.38204, + 0.0, + 0.0, + 1.0, + -0.063999, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0244_01": [ + 0.374607, + 0.927184, + 0.0, + -4.15894, + -0.927184, + 0.374607, + 0.0, + 1.20737, + 0.0, + 0.0, + 1.0, + -0.102783, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0245_00": [ + -0.978148, + 0.207912, + 0.0, + 2.34808, + -0.207912, + -0.978148, + 0.0, + 4.76965, + 0.0, + 0.0, + 1.0, + -0.079828, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0246_00": [ + 0.984808, + 0.173648, + 0.0, + -4.24729, + -0.173648, + 0.984808, + 0.0, + -2.192, + 0.0, + 0.0, + 1.0, + -0.177116, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0247_00": [ + 0.507538, + 0.861629, + 0.0, + -5.50299, + -0.861629, + 0.507538, + 0.0, + 2.33974, + 0.0, + 0.0, + 1.0, + -0.121843, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0247_01": [ + -0.061049, + 0.998135, + 0.0, + -2.18872, + -0.998135, + -0.061049, + 0.0, + 4.70945, + 0.0, + 0.0, + 1.0, + -0.14442, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0248_00": [ + -0.008727, + 0.999962, + 0.0, + -3.20681, + -0.999962, + -0.008727, + 0.0, + 3.12237, + 0.0, + 0.0, + 1.0, + -0.042774, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0248_01": [ + -0.861629, + 0.507538, + 0.0, + 1.39293, + -0.507538, + -0.861629, + 0.0, + 4.10089, + 0.0, + 0.0, + 1.0, + -0.064792, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0248_02": [ + -0.190809, + 0.981627, + 0.0, + -2.67324, + -0.981627, + -0.190809, + 0.0, + 3.83724, + 0.0, + 0.0, + 1.0, + -0.084382, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0249_00": [ + -0.681998, + 0.731354, + 0.0, + 0.862493, + -0.731354, + -0.681998, + 0.0, + 8.59665, + 0.0, + 0.0, + 1.0, + -0.087624, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0250_00": [ + -0.34202, + 0.939693, + 0.0, + -0.750564, + -0.939693, + -0.34202, + 0.0, + 5.5914, + 0.0, + 0.0, + 1.0, + -0.060418, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0250_01": [ + -0.936672, + 0.350207, + 0.0, + 2.02512, + -0.350207, + -0.936672, + 0.0, + 6.3345, + 0.0, + 0.0, + 1.0, + -0.057076, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0250_02": [ + -0.156434, + 0.987688, + 0.0, + -0.956119, + -0.987688, + -0.156434, + 0.0, + 5.51549, + 0.0, + 0.0, + 1.0, + -0.059574, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0251_00": [ + -0.622515, + 0.782608, + 0.0, + -1.26712, + -0.782608, + -0.622515, + 0.0, + 6.79866, + 0.0, + 0.0, + 1.0, + -0.374548, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0252_00": [ + 0.026177, + 0.999657, + 0.0, + -3.42741, + -0.999657, + 0.026177, + 0.0, + 2.05628, + 0.0, + 0.0, + 1.0, + -0.086844, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0253_00": [ + 0.147809, + 0.989016, + 0.0, + -3.2421, + -0.989016, + 0.147809, + 0.0, + 2.79282, + 0.0, + 0.0, + 1.0, + -0.344997, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0254_00": [ + 0.979925, + 0.199368, + 0.0, + -3.39505, + -0.199368, + 0.979925, + 0.0, + -3.6404, + 0.0, + 0.0, + 1.0, + -0.100367, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0254_01": [ + -0.382683, + 0.92388, + 0.0, + -2.21318, + -0.92388, + -0.382683, + 0.0, + 5.93124, + 0.0, + 0.0, + 1.0, + -0.124147, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0255_00": [ + 0.026177, + 0.999657, + 0.0, + -1.83382, + -0.999657, + 0.026177, + 0.0, + 1.48743, + 0.0, + 0.0, + 1.0, + -0.091824, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0255_01": [ + -0.043619, + 0.999048, + 0.0, + -1.73379, + -0.999048, + -0.043619, + 0.0, + 1.73647, + 0.0, + 0.0, + 1.0, + -0.052235, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0255_02": [ + 0.104528, + 0.994522, + 0.0, + -2.46517, + -0.994522, + 0.104528, + 0.0, + 1.40913, + 0.0, + 0.0, + 1.0, + -0.045001, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0256_00": [ + -0.275637, + 0.961262, + 0.0, + -3.35431, + -0.961262, + -0.275637, + 0.0, + 2.60144, + 0.0, + 0.0, + 1.0, + -0.038708, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0256_01": [ + -0.078459, + 0.996917, + 0.0, + -3.46376, + -0.996917, + -0.078459, + 0.0, + 1.55552, + 0.0, + 0.0, + 1.0, + -0.032672, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0256_02": [ + -0.130526, + 0.991445, + 0.0, + -1.91908, + -0.991445, + -0.130526, + 0.0, + 3.77516, + 0.0, + 0.0, + 1.0, + -0.072171, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0257_00": [ + 0.999962, + 0.008727, + 0.0, + -2.88777, + -0.008727, + 0.999962, + 0.0, + -4.44731, + 0.0, + 0.0, + 1.0, + -0.224865, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0258_00": [ + 0.998135, + 0.061049, + 0.0, + -1.30985, + -0.061049, + 0.998135, + 0.0, + -4.0578, + 0.0, + 0.0, + 1.0, + -0.06908, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0259_00": [ + 0.920505, + 0.390731, + 0.0, + -4.71278, + -0.390731, + 0.920505, + 0.0, + -3.45373, + 0.0, + 0.0, + 1.0, + -0.10063, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0259_01": [ + -0.999657, + 0.026177, + 0.0, + 2.53967, + -0.026177, + -0.999657, + 0.0, + 5.46302, + 0.0, + 0.0, + 1.0, + -0.109413, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0260_00": [ + -0.026177, + 0.999657, + 0.0, + -2.06556, + -0.999657, + -0.026177, + 0.0, + 3.33965, + 0.0, + 0.0, + 1.0, + -0.087693, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0260_01": [ + -0.833886, + 0.551937, + 0.0, + -0.267384, + -0.551937, + -0.833886, + 0.0, + 2.87037, + 0.0, + 0.0, + 1.0, + -0.048557, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0260_02": [ + 0.147809, + 0.989016, + 0.0, + -1.36082, + -0.989016, + 0.147809, + 0.0, + 1.16161, + 0.0, + 0.0, + 1.0, + -0.051581, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0261_00": [ + 0.008727, + 0.999962, + 0.0, + -1.69806, + -0.999962, + 0.008727, + 0.0, + 2.559, + 0.0, + 0.0, + 1.0, + -0.112328, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0261_01": [ + 0.999657, + 0.026177, + 0.0, + -2.81513, + -0.026177, + 0.999657, + 0.0, + -2.48536, + 0.0, + 0.0, + 1.0, + -0.117588, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0261_02": [ + 0.008727, + 0.999962, + 0.0, + -1.38144, + -0.999962, + 0.008727, + 0.0, + 1.68357, + 0.0, + 0.0, + 1.0, + -0.184512, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0261_03": [ + -0.008727, + 0.999962, + 0.0, + -1.32785, + -0.999962, + -0.008727, + 0.0, + 1.76239, + 0.0, + 0.0, + 1.0, + -0.18775, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0262_00": [ + -0.284015, + 0.95882, + 0.0, + -0.455565, + -0.95882, + -0.284015, + 0.0, + 4.5745, + 0.0, + 0.0, + 1.0, + -0.190829, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0262_01": [ + -0.515038, + 0.857167, + 0.0, + 0.705694, + -0.857167, + -0.515038, + 0.0, + 5.64088, + 0.0, + 0.0, + 1.0, + -0.058602, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0263_00": [ + 0.139173, + 0.990268, + 0.0, + -2.35294, + -0.990268, + 0.139173, + 0.0, + 1.62616, + 0.0, + 0.0, + 1.0, + -0.057331, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0263_01": [ + -0.043619, + 0.999048, + 0.0, + -1.9827, + -0.999048, + -0.043619, + 0.0, + 3.62009, + 0.0, + 0.0, + 1.0, + -0.076409, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0264_00": [ + 0.707107, + 0.707107, + 0.0, + -4.63519, + -0.707107, + 0.707107, + 0.0, + -1.21662, + 0.0, + 0.0, + 1.0, + -0.082364, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0264_01": [ + 0.978148, + 0.207912, + 0.0, + -2.97763, + -0.207912, + 0.978148, + 0.0, + -3.9304, + 0.0, + 0.0, + 1.0, + -0.081372, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0264_02": [ + 0.182236, + 0.983255, + 0.0, + -3.73888, + -0.983255, + 0.182236, + 0.0, + 1.17942, + 0.0, + 0.0, + 1.0, + -0.178448, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0265_00": [ + 0.017452, + 0.999848, + 0.0, + -1.21913, + -0.999848, + 0.017452, + 0.0, + 1.17246, + 0.0, + 0.0, + 1.0, + -0.041979, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0265_01": [ + 0.026177, + 0.999657, + 0.0, + -1.32941, + -0.999657, + 0.026177, + 0.0, + 1.18238, + 0.0, + 0.0, + 1.0, + -0.029123, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0265_02": [ + 0.052336, + 0.99863, + 0.0, + -1.33586, + -0.99863, + 0.052336, + 0.0, + 1.70383, + 0.0, + 0.0, + 1.0, + -0.053795, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0266_00": [ + 0.292372, + 0.956305, + 0.0, + -4.63831, + -0.956305, + 0.292372, + 0.0, + 2.30935, + 0.0, + 0.0, + 1.0, + -0.097438, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0266_01": [ + 0.814116, + 0.580703, + 0.0, + -5.81996, + -0.580703, + 0.814116, + 0.0, + -1.79319, + 0.0, + 0.0, + 1.0, + -0.085049, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0267_00": [ + 0.694658, + 0.71934, + 0.0, + -4.28242, + -0.71934, + 0.694658, + 0.0, + -0.317042, + 0.0, + 0.0, + 1.0, + -0.103809, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0268_00": [ + -0.366501, + 0.930418, + 0.0, + -3.34177, + -0.930418, + -0.366501, + 0.0, + 3.0643, + 0.0, + 0.0, + 1.0, + -0.07, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0268_01": [ + 0.819152, + 0.573576, + 0.0, + -2.31469, + -0.573576, + 0.819152, + 0.0, + -0.470647, + 0.0, + 0.0, + 1.0, + -0.0625, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0268_02": [ + -0.043619, + 0.999048, + 0.0, + -4.02631, + -0.999048, + -0.043619, + 0.0, + 1.71732, + 0.0, + 0.0, + 1.0, + -0.043279, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0269_00": [ + 0.913545, + 0.406737, + 0.0, + -5.01959, + -0.406737, + 0.913545, + 0.0, + -0.81695, + 0.0, + 0.0, + 1.0, + -0.414641, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0269_01": [ + 0.707107, + 0.707107, + 0.0, + -4.12942, + -0.707107, + 0.707107, + 0.0, + -0.073786, + 0.0, + 0.0, + 1.0, + -0.048466, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0269_02": [ + -0.061049, + 0.998135, + 0.0, + -2.40891, + -0.998135, + -0.061049, + 0.0, + 4.45686, + 0.0, + 0.0, + 1.0, + -0.047305, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0270_00": [ + -0.25038, + 0.968148, + 0.0, + -1.7133, + -0.968148, + -0.25038, + 0.0, + 4.39728, + 0.0, + 0.0, + 1.0, + -0.147489, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0270_01": [ + -0.130526, + 0.991445, + 0.0, + -2.54196, + -0.991445, + -0.130526, + 0.0, + 6.60939, + 0.0, + 0.0, + 1.0, + -0.116634, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0270_02": [ + -0.173648, + 0.984808, + 0.0, + -2.85463, + -0.984808, + -0.173648, + 0.0, + 4.70032, + 0.0, + 0.0, + 1.0, + -0.125838, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0271_00": [ + 0.5373, + 0.843391, + 0.0, + -4.32316, + -0.843391, + 0.5373, + 0.0, + 1.37071, + 0.0, + 0.0, + 1.0, + -0.286397, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0271_01": [ + -0.878817, + 0.477159, + 0.0, + 0.594726, + -0.477159, + -0.878817, + 0.0, + 3.77401, + 0.0, + 0.0, + 1.0, + -0.23669, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0272_00": [ + -0.061049, + 0.998135, + 0.0, + -1.79257, + -0.998135, + -0.061049, + 0.0, + 1.6571, + 0.0, + 0.0, + 1.0, + -0.037661, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0272_01": [ + -0.095846, + 0.995396, + 0.0, + -2.07937, + -0.995396, + -0.095846, + 0.0, + 3.63238, + 0.0, + 0.0, + 1.0, + -0.085233, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0273_00": [ + 0.008727, + 0.999962, + 0.0, + -4.08971, + -0.999962, + 0.008727, + 0.0, + 3.86814, + 0.0, + 0.0, + 1.0, + -0.128352, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0273_01": [ + 0.069757, + 0.997564, + 0.0, + -4.16985, + -0.997564, + 0.069757, + 0.0, + 3.38278, + 0.0, + 0.0, + 1.0, + -0.076511, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0274_00": [ + -0.843391, + 0.5373, + 0.0, + 0.554006, + -0.5373, + -0.843391, + 0.0, + 3.39266, + 0.0, + 0.0, + 1.0, + -0.068875, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0274_01": [ + 0.165048, + 0.986286, + 0.0, + -2.27703, + -0.986286, + 0.165048, + 0.0, + 2.30945, + 0.0, + 0.0, + 1.0, + -0.072205, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0274_02": [ + -0.147809, + 0.989016, + 0.0, + -1.09043, + -0.989016, + -0.147809, + 0.0, + 4.40226, + 0.0, + 0.0, + 1.0, + -0.081496, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0275_00": [ + 0.898794, + 0.438371, + 0.0, + -4.63936, + -0.438371, + 0.898794, + 0.0, + -1.1859, + 0.0, + 0.0, + 1.0, + -0.054037, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0276_00": [ + 0.989016, + 0.147809, + 0.0, + -4.83179, + -0.147809, + 0.989016, + 0.0, + -3.84083, + 0.0, + 0.0, + 1.0, + -0.085604, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0276_01": [ + -0.984808, + 0.173648, + 0.0, + 3.19793, + -0.173648, + -0.984808, + 0.0, + 5.40466, + 0.0, + 0.0, + 1.0, + -0.061624, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0277_00": [ + -0.843391, + 0.5373, + 0.0, + 0.472956, + -0.5373, + -0.843391, + 0.0, + 3.37465, + 0.0, + 0.0, + 1.0, + -0.073031, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0277_01": [ + -0.317305, + 0.948324, + 0.0, + -1.31924, + -0.948324, + -0.317305, + 0.0, + 2.77863, + 0.0, + 0.0, + 1.0, + -0.039659, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0277_02": [ + 0.147809, + 0.989016, + 0.0, + -2.06312, + -0.989016, + 0.147809, + 0.0, + 1.8496, + 0.0, + 0.0, + 1.0, + -0.039471, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0278_00": [ + -0.008727, + 0.999962, + 0.0, + -1.19636, + -0.999962, + -0.008727, + 0.0, + 4.06051, + 0.0, + 0.0, + 1.0, + -0.097561, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0278_01": [ + 0.069757, + 0.997564, + 0.0, + -1.7755, + -0.997564, + 0.069757, + 0.0, + 3.78288, + 0.0, + 0.0, + 1.0, + -0.06643, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0279_00": [ + 0.438371, + 0.898794, + 0.0, + -4.40499, + -0.898794, + 0.438371, + 0.0, + 1.93149, + 0.0, + 0.0, + 1.0, + -0.235368, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0279_01": [ + 0.707107, + 0.707107, + 0.0, + -5.48754, + -0.707107, + 0.707107, + 0.0, + -0.391232, + 0.0, + 0.0, + 1.0, + -0.059288, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0279_02": [ + -0.725374, + 0.688354, + 0.0, + -0.156992, + -0.688354, + -0.725374, + 0.0, + 4.61114, + 0.0, + 0.0, + 1.0, + -0.097523, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0280_00": [ + 0.843391, + 0.5373, + 0.0, + -4.53498, + -0.5373, + 0.843391, + 0.0, + -1.31271, + 0.0, + 0.0, + 1.0, + -0.144144, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0280_01": [ + -0.390731, + 0.920505, + 0.0, + -1.42928, + -0.920505, + -0.390731, + 0.0, + 3.78952, + 0.0, + 0.0, + 1.0, + -0.166482, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0280_02": [ + 0.777146, + 0.62932, + 0.0, + -4.95063, + -0.62932, + 0.777146, + 0.0, + -0.776023, + 0.0, + 0.0, + 1.0, + -0.163804, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0281_00": [ + -0.989016, + 0.147809, + 0.0, + 3.40505, + -0.147809, + -0.989016, + 0.0, + 5.05669, + 0.0, + 0.0, + 1.0, + -0.063438, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0282_00": [ + -0.052336, + 0.99863, + 0.0, + -2.97202, + -0.99863, + -0.052336, + 0.0, + 1.65986, + 0.0, + 0.0, + 1.0, + -0.114141, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0282_01": [ + -0.113203, + 0.993572, + 0.0, + -2.40516, + -0.993572, + -0.113203, + 0.0, + 1.85535, + 0.0, + 0.0, + 1.0, + -0.091376, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0282_02": [ + -0.113203, + 0.993572, + 0.0, + -2.00381, + -0.993572, + -0.113203, + 0.0, + 1.77746, + 0.0, + 0.0, + 1.0, + -0.07566, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0283_00": [ + -0.833886, + 0.551937, + 0.0, + 0.865095, + -0.551937, + -0.833886, + 0.0, + 4.58832, + 0.0, + 0.0, + 1.0, + -0.055596, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0284_00": [ + 0.139173, + 0.990268, + 0.0, + -2.47282, + -0.990268, + 0.139173, + 0.0, + 2.98517, + 0.0, + 0.0, + 1.0, + -0.063082, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0285_00": [ + -0.636078, + 0.771625, + 0.0, + -0.579249, + -0.771625, + -0.636078, + 0.0, + 5.29903, + 0.0, + 0.0, + 1.0, + -0.067016, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0286_00": [ + 0.507538, + 0.861629, + 0.0, + -4.13528, + -0.861629, + 0.507538, + 0.0, + 1.81745, + 0.0, + 0.0, + 1.0, + -0.039445, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0286_01": [ + 0.207912, + 0.978148, + 0.0, + -2.98835, + -0.978148, + 0.207912, + 0.0, + 3.43899, + 0.0, + 0.0, + 1.0, + -0.051328, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0286_02": [ + 0.398749, + 0.91706, + 0.0, + -2.8491, + -0.91706, + 0.398749, + 0.0, + 1.4566, + 0.0, + 0.0, + 1.0, + -0.056476, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0286_03": [ + 0.398749, + 0.91706, + 0.0, + -2.85813, + -0.91706, + 0.398749, + 0.0, + 1.32484, + 0.0, + 0.0, + 1.0, + -0.040611, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0287_00": [ + -0.069757, + 0.997564, + 0.0, + -1.38446, + -0.997564, + -0.069757, + 0.0, + 1.62758, + 0.0, + 0.0, + 1.0, + -0.033585, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0288_00": [ + 0.156434, + 0.987688, + 0.0, + -3.20457, + -0.987688, + 0.156434, + 0.0, + 1.61927, + 0.0, + 0.0, + 1.0, + -0.023387, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0288_01": [ + 0.130526, + 0.991445, + 0.0, + -2.76921, + -0.991445, + 0.130526, + 0.0, + 1.91114, + 0.0, + 0.0, + 1.0, + -0.031426, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0288_02": [ + 0.095846, + 0.995396, + 0.0, + -2.62067, + -0.995396, + 0.095846, + 0.0, + 1.7997, + 0.0, + 0.0, + 1.0, + -0.048822, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0289_00": [ + -0.878817, + 0.477159, + 0.0, + 1.05642, + -0.477159, + -0.878817, + 0.0, + 3.69078, + 0.0, + 0.0, + 1.0, + -0.220583, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0289_01": [ + -0.422618, + 0.906308, + 0.0, + -0.816674, + -0.906308, + -0.422618, + 0.0, + 4.53149, + 0.0, + 0.0, + 1.0, + -0.040743, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0290_00": [ + 0.930418, + 0.366501, + 0.0, + -6.6217, + -0.366501, + 0.930418, + 0.0, + -6.62825, + 0.0, + 0.0, + 1.0, + -0.15419, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0291_00": [ + -0.983255, + 0.182236, + 0.0, + 2.07304, + -0.182236, + -0.983255, + 0.0, + 4.20178, + 0.0, + 0.0, + 1.0, + -0.069467, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0291_01": [ + -0.894934, + 0.446198, + 0.0, + 1.19421, + -0.446198, + -0.894934, + 0.0, + 4.93588, + 0.0, + 0.0, + 1.0, + -0.061694, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0291_02": [ + -0.580703, + 0.814116, + 0.0, + -1.10555, + -0.814116, + -0.580703, + 0.0, + 5.79807, + 0.0, + 0.0, + 1.0, + -0.091578, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0292_00": [ + -0.358368, + 0.93358, + 0.0, + -1.26551, + -0.93358, + -0.358368, + 0.0, + 2.50267, + 0.0, + 0.0, + 1.0, + -0.024092, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0292_01": [ + 0.587785, + 0.809017, + 0.0, + -4.09098, + -0.809017, + 0.587785, + 0.0, + 1.5815, + 0.0, + 0.0, + 1.0, + -0.038086, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0293_00": [ + -0.492424, + 0.870356, + 0.0, + -0.191494, + -0.870356, + -0.492424, + 0.0, + 4.30417, + 0.0, + 0.0, + 1.0, + -0.072851, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0293_01": [ + -0.981627, + 0.190809, + 0.0, + 2.92062, + -0.190809, + -0.981627, + 0.0, + 4.69634, + 0.0, + 0.0, + 1.0, + -0.717786, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0294_00": [ + 0.833886, + 0.551937, + 0.0, + -4.52052, + -0.551937, + 0.833886, + 0.0, + -1.4828, + 0.0, + 0.0, + 1.0, + -0.044584, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0294_01": [ + -0.156434, + 0.987688, + 0.0, + -2.20846, + -0.987688, + -0.156434, + 0.0, + 3.3224, + 0.0, + 0.0, + 1.0, + -0.03852, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0294_02": [ + -0.309017, + 0.951056, + 0.0, + -1.86028, + -0.951056, + -0.309017, + 0.0, + 3.84025, + 0.0, + 0.0, + 1.0, + -0.169999, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0295_00": [ + 0.93358, + 0.358368, + 0.0, + -3.00245, + -0.358368, + 0.93358, + 0.0, + -1.02018, + 0.0, + 0.0, + 1.0, + -0.094566, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0295_01": [ + 0.857167, + 0.515038, + 0.0, + -3.01319, + -0.515038, + 0.857167, + 0.0, + -0.450244, + 0.0, + 0.0, + 1.0, + -0.050995, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0296_00": [ + 0.165048, + 0.986286, + 0.0, + -4.28489, + -0.986286, + 0.165048, + 0.0, + 3.05976, + 0.0, + 0.0, + 1.0, + -0.065552, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0296_01": [ + -0.870356, + 0.492424, + 0.0, + 1.73972, + -0.492424, + -0.870356, + 0.0, + 4.85435, + 0.0, + 0.0, + 1.0, + -0.031501, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0297_00": [ + -0.398749, + 0.91706, + 0.0, + -1.55846, + -0.91706, + -0.398749, + 0.0, + 4.43261, + 0.0, + 0.0, + 1.0, + -0.046537, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0297_01": [ + -0.121869, + 0.992546, + 0.0, + -2.66247, + -0.992546, + -0.121869, + 0.0, + 2.51171, + 0.0, + 0.0, + 1.0, + -0.027629, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0297_02": [ + -0.156434, + 0.987688, + 0.0, + -2.2523, + -0.987688, + -0.156434, + 0.0, + 2.93165, + 0.0, + 0.0, + 1.0, + -0.043178, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0298_00": [ + -0.333807, + 0.942641, + 0.0, + -0.542284, + -0.942641, + -0.333807, + 0.0, + 4.75996, + 0.0, + 0.0, + 1.0, + -0.041006, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0299_00": [ + 0.622515, + 0.782608, + 0.0, + -5.55481, + -0.782608, + 0.622515, + 0.0, + 0.78791, + 0.0, + 0.0, + 1.0, + -0.069811, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0299_01": [ + -0.898794, + 0.438371, + 0.0, + 0.742839, + -0.438371, + -0.898794, + 0.0, + 5.84114, + 0.0, + 0.0, + 1.0, + -0.055515, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0300_00": [ + -0.026177, + 0.999657, + 0.0, + -3.93294, + -0.999657, + -0.026177, + 0.0, + 2.03932, + 0.0, + 0.0, + 1.0, + -0.09942, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0300_01": [ + -0.165048, + 0.986286, + 0.0, + -3.70907, + -0.986286, + -0.165048, + 0.0, + 5.70023, + 0.0, + 0.0, + 1.0, + -0.07902, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0301_00": [ + 0.130526, + 0.991445, + 0.0, + -4.41265, + -0.991445, + 0.130526, + 0.0, + 1.55557, + 0.0, + 0.0, + 1.0, + -0.081471, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0301_01": [ + -0.182235, + 0.983255, + 0.0, + -2.46256, + -0.983255, + -0.182235, + 0.0, + 4.50636, + 0.0, + 0.0, + 1.0, + -0.076341, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0301_02": [ + 0.043619, + 0.999048, + 0.0, + -3.45343, + -0.999048, + 0.043619, + 0.0, + 3.80094, + 0.0, + 0.0, + 1.0, + -0.073736, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0302_00": [ + -0.477159, + 0.878817, + 0.0, + -1.21508, + -0.878817, + -0.477159, + 0.0, + 4.97064, + 0.0, + 0.0, + 1.0, + -0.136549, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0302_01": [ + -0.861629, + 0.507538, + 0.0, + 1.57434, + -0.507538, + -0.861629, + 0.0, + 4.47061, + 0.0, + 0.0, + 1.0, + -0.098224, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0303_00": [ + 0.992546, + 0.121869, + 0.0, + -1.45401, + -0.121869, + 0.992546, + 0.0, + -1.66817, + 0.0, + 0.0, + 1.0, + -0.054445, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0303_01": [ + 0.325568, + 0.945519, + 0.0, + -1.97727, + -0.945519, + 0.325568, + 0.0, + 1.33715, + 0.0, + 0.0, + 1.0, + -0.062389, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0303_02": [ + -0.953717, + 0.300706, + 0.0, + 0.691332, + -0.300706, + -0.953717, + 0.0, + 2.33185, + 0.0, + 0.0, + 1.0, + -0.076116, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0304_00": [ + -0.843391, + 0.5373, + 0.0, + 1.0781, + -0.5373, + -0.843391, + 0.0, + 6.19778, + 0.0, + 0.0, + 1.0, + -0.095249, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0305_00": [ + 0.258819, + 0.965926, + 0.0, + -3.51904, + -0.965926, + 0.258819, + 0.0, + 3.27903, + 0.0, + 0.0, + 1.0, + -0.078539, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0305_01": [ + 0.113203, + 0.993572, + 0.0, + -2.84936, + -0.993572, + 0.113203, + 0.0, + 4.81586, + 0.0, + 0.0, + 1.0, + -0.081365, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0306_00": [ + -0.824126, + 0.566406, + 0.0, + 0.436602, + -0.566406, + -0.824126, + 0.0, + 3.97624, + 0.0, + 0.0, + 1.0, + -0.158652, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0306_01": [ + -0.382683, + 0.92388, + 0.0, + -0.659499, + -0.92388, + -0.382683, + 0.0, + 3.26352, + 0.0, + 0.0, + 1.0, + -0.143696, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0307_00": [ + 0.430511, + 0.902585, + 0.0, + -5.50991, + -0.902585, + 0.430511, + 0.0, + 1.43868, + 0.0, + 0.0, + 1.0, + -0.08074, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0307_01": [ + -0.275637, + 0.961262, + 0.0, + -2.9451, + -0.961262, + -0.275637, + 0.0, + 3.9095, + 0.0, + 0.0, + 1.0, + -0.069496, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0307_02": [ + -0.636078, + 0.771625, + 0.0, + -0.668625, + -0.771625, + -0.636078, + 0.0, + 5.76196, + 0.0, + 0.0, + 1.0, + -0.07892, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0308_00": [ + 0.113203, + 0.993572, + 0.0, + -3.17452, + -0.993572, + 0.113203, + 0.0, + 1.386, + 0.0, + 0.0, + 1.0, + -0.089996, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0309_00": [ + -0.139173, + 0.990268, + 0.0, + -4.60862, + -0.990268, + -0.139173, + 0.0, + 4.37247, + 0.0, + 0.0, + 1.0, + -0.049636, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0309_01": [ + -0.95882, + 0.284015, + 0.0, + 3.92323, + -0.284015, + -0.95882, + 0.0, + 4.99093, + 0.0, + 0.0, + 1.0, + -0.204371, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0310_00": [ + -0.130526, + 0.991445, + 0.0, + -0.934172, + -0.991445, + -0.130526, + 0.0, + 2.19715, + 0.0, + 0.0, + 1.0, + -0.043905, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0310_01": [ + -0.052336, + 0.99863, + 0.0, + -1.00884, + -0.99863, + -0.052336, + 0.0, + 1.86081, + 0.0, + 0.0, + 1.0, + -0.08779, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0310_02": [ + -0.026177, + 0.999657, + 0.0, + -1.03349, + -0.999657, + -0.026177, + 0.0, + 2.00326, + 0.0, + 0.0, + 1.0, + -0.107064, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0311_00": [ + 0.207912, + 0.978148, + 0.0, + -3.19363, + -0.978148, + 0.207912, + 0.0, + 2.64471, + 0.0, + 0.0, + 1.0, + -0.038942, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0312_00": [ + -0.325568, + 0.945519, + 0.0, + -2.83856, + -0.945519, + -0.325568, + 0.0, + 6.24705, + 0.0, + 0.0, + 1.0, + -0.06253, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0312_01": [ + -0.275637, + 0.961262, + 0.0, + -2.27563, + -0.961262, + -0.275637, + 0.0, + 7.88306, + 0.0, + 0.0, + 1.0, + -0.151094, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0312_02": [ + -0.559193, + 0.829038, + 0.0, + -4.06446, + -0.829038, + -0.559193, + 0.0, + 7.08548, + 0.0, + 0.0, + 1.0, + -0.125933, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0313_00": [ + 0.887011, + 0.461749, + 0.0, + -5.68766, + -0.461749, + 0.887011, + 0.0, + -1.64838, + 0.0, + 0.0, + 1.0, + -0.078991, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0313_01": [ + -0.71934, + 0.694658, + 0.0, + -0.61128, + -0.694658, + -0.71934, + 0.0, + 5.30216, + 0.0, + 0.0, + 1.0, + -0.042238, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0313_02": [ + -0.71934, + 0.694658, + 0.0, + -1.67054, + -0.694658, + -0.71934, + 0.0, + 4.33349, + 0.0, + 0.0, + 1.0, + -0.056281, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0314_00": [ + 0.207912, + 0.978148, + 0.0, + -2.69759, + -0.978148, + 0.207912, + 0.0, + 1.65224, + 0.0, + 0.0, + 1.0, + -0.050099, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0315_00": [ + 0.945519, + 0.325568, + 0.0, + -4.53879, + -0.325568, + 0.945519, + 0.0, + -3.21923, + 0.0, + 0.0, + 1.0, + -0.123849, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0316_00": [ + -0.190809, + 0.981627, + 0.0, + -1.6473, + -0.981627, + -0.190809, + 0.0, + 2.61593, + 0.0, + 0.0, + 1.0, + -0.029897, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0317_00": [ + -0.284015, + 0.95882, + 0.0, + -2.85071, + -0.95882, + -0.284015, + 0.0, + 6.58963, + 0.0, + 0.0, + 1.0, + -0.052686, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0317_01": [ + 0.095846, + 0.995396, + 0.0, + -5.68345, + -0.995396, + 0.095846, + 0.0, + 4.88716, + 0.0, + 0.0, + 1.0, + -0.130744, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0318_00": [ + 0.052336, + 0.99863, + 0.0, + -3.41635, + -0.99863, + 0.052336, + 0.0, + 3.66674, + 0.0, + 0.0, + 1.0, + -0.046905, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0319_00": [ + 0.681998, + 0.731354, + 0.0, + -6.10768, + -0.731354, + 0.681998, + 0.0, + -1.11454, + 0.0, + 0.0, + 1.0, + -0.060903, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0320_00": [ + 0.026177, + 0.999657, + 0.0, + -1.85463, + -0.999657, + 0.026177, + 0.0, + 10.2331, + 0.0, + 0.0, + 1.0, + -0.09181, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0320_01": [ + 0.199368, + 0.979925, + 0.0, + -3.37786, + -0.979925, + 0.199368, + 0.0, + 5.21071, + 0.0, + 0.0, + 1.0, + -0.060091, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0320_02": [ + -0.998135, + 0.061049, + 0.0, + 2.51018, + -0.061049, + -0.998135, + 0.0, + 6.38687, + 0.0, + 0.0, + 1.0, + -0.088188, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0320_03": [ + -0.71325, + 0.700909, + 0.0, + -0.173957, + -0.700909, + -0.71325, + 0.0, + 6.81077, + 0.0, + 0.0, + 1.0, + -0.027065, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0321_00": [ + -0.366501, + 0.930418, + 0.0, + -0.78027, + -0.930418, + -0.366501, + 0.0, + 3.60883, + 0.0, + 0.0, + 1.0, + -0.045631, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0322_00": [ + -0.996917, + 0.078459, + 0.0, + 2.32407, + -0.078459, + -0.996917, + 0.0, + 3.2168, + 0.0, + 0.0, + 1.0, + -0.053284, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0323_00": [ + 0.34202, + 0.939693, + 0.0, + -3.56796, + -0.939693, + 0.34202, + 0.0, + 2.35622, + 0.0, + 0.0, + 1.0, + -0.121373, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0323_01": [ + -0.406737, + 0.913545, + 0.0, + -0.606656, + -0.913545, + -0.406737, + 0.0, + 3.14581, + 0.0, + 0.0, + 1.0, + -0.154768, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0324_00": [ + -0.173648, + 0.984808, + 0.0, + -3.8315, + -0.984808, + -0.173648, + 0.0, + 4.09283, + 0.0, + 0.0, + 1.0, + -0.035603, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0324_01": [ + 0.034899, + 0.999391, + 0.0, + -2.57517, + -0.999391, + 0.034899, + 0.0, + 4.04995, + 0.0, + 0.0, + 1.0, + -0.056227, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0325_00": [ + -0.995396, + 0.095846, + 0.0, + 1.43846, + -0.095846, + -0.995396, + 0.0, + 4.053, + 0.0, + 0.0, + 1.0, + -0.049516, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0325_01": [ + 0.069757, + 0.997564, + 0.0, + -3.22459, + -0.997564, + 0.069757, + 0.0, + 2.65642, + 0.0, + 0.0, + 1.0, + -0.094971, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0326_00": [ + -0.008727, + 0.999962, + 0.0, + -4.15633, + -0.999962, + -0.008727, + 0.0, + 2.09606, + 0.0, + 0.0, + 1.0, + -0.028834, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0327_00": [ + -0.573576, + 0.819152, + 0.0, + -0.309522, + -0.819152, + -0.573576, + 0.0, + 4.26796, + 0.0, + 0.0, + 1.0, + -0.081413, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0328_00": [ + -0.737277, + 0.67559, + 0.0, + -0.227084, + -0.67559, + -0.737277, + 0.0, + 3.21433, + 0.0, + 0.0, + 1.0, + -0.047283, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0329_00": [ + -0.694658, + 0.71934, + 0.0, + -0.584211, + -0.71934, + -0.694658, + 0.0, + 7.3374, + 0.0, + 0.0, + 1.0, + -0.087177, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0329_01": [ + -0.777146, + 0.629321, + 0.0, + -0.513216, + -0.629321, + -0.777146, + 0.0, + 6.66912, + 0.0, + 0.0, + 1.0, + -0.124083, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0329_02": [ + -0.71325, + 0.700909, + 0.0, + -1.44005, + -0.700909, + -0.71325, + 0.0, + 7.86936, + 0.0, + 0.0, + 1.0, + -0.142992, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0330_00": [ + 0.96363, + 0.267238, + 0.0, + -4.87307, + -0.267238, + 0.96363, + 0.0, + -3.22511, + 0.0, + 0.0, + 1.0, + -0.06115, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0331_00": [ + 0.995396, + 0.095846, + 0.0, + -2.06182, + -0.095846, + 0.995396, + 0.0, + -2.28394, + 0.0, + 0.0, + 1.0, + -0.121736, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0331_01": [ + 0.983255, + 0.182236, + 0.0, + -3.41582, + -0.182236, + 0.983255, + 0.0, + -1.60593, + 0.0, + 0.0, + 1.0, + -0.104, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0332_00": [ + -0.156434, + 0.987688, + 0.0, + -1.7437, + -0.987688, + -0.156434, + 0.0, + 2.24175, + 0.0, + 0.0, + 1.0, + -0.054826, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0332_01": [ + 0.207912, + 0.978148, + 0.0, + -4.80643, + -0.978148, + 0.207912, + 0.0, + 1.8458, + 0.0, + 0.0, + 1.0, + -0.054318, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0332_02": [ + 0.233445, + 0.97237, + 0.0, + -2.64381, + -0.97237, + 0.233445, + 0.0, + 1.561, + 0.0, + 0.0, + 1.0, + -0.030635, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0333_00": [ + -0.300706, + 0.953717, + 0.0, + -0.905242, + -0.953717, + -0.300706, + 0.0, + 1.7344, + 0.0, + 0.0, + 1.0, + -0.077967, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0334_00": [ + 0.920505, + 0.390731, + 0.0, + -8.22273, + -0.390731, + 0.920505, + 0.0, + -0.644111, + 0.0, + 0.0, + 1.0, + -0.428044, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0334_01": [ + 0.997564, + 0.069757, + 0.0, + -6.57887, + -0.069757, + 0.997564, + 0.0, + -4.4989, + 0.0, + 0.0, + 1.0, + -0.073195, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0334_02": [ + -0.990268, + 0.139173, + 0.0, + 4.04431, + -0.139173, + -0.990268, + 0.0, + 6.21667, + 0.0, + 0.0, + 1.0, + -0.433012, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0335_00": [ + 0.350207, + 0.936672, + 0.0, + -5.6229, + -0.936672, + 0.350207, + 0.0, + 3.30812, + 0.0, + 0.0, + 1.0, + -0.104635, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0335_01": [ + 0.984808, + 0.173648, + 0.0, + -2.47289, + -0.173648, + 0.984808, + 0.0, + -2.708, + 0.0, + 0.0, + 1.0, + -0.101553, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0335_02": [ + 0.968148, + 0.25038, + 0.0, + -2.86275, + -0.25038, + 0.968148, + 0.0, + -2.55853, + 0.0, + 0.0, + 1.0, + -0.127252, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0336_00": [ + -0.898794, + 0.438371, + 0.0, + 1.12972, + -0.438371, + -0.898794, + 0.0, + 3.26906, + 0.0, + 0.0, + 1.0, + -0.145086, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0336_01": [ + 0.173648, + 0.984808, + 0.0, + -2.8235, + -0.984808, + 0.173648, + 0.0, + 1.80459, + 0.0, + 0.0, + 1.0, + -0.197281, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0337_00": [ + -0.608761, + 0.793353, + 0.0, + -0.157034, + -0.793353, + -0.608761, + 0.0, + 4.59063, + 0.0, + 0.0, + 1.0, + -0.050666, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0337_01": [ + 0.999962, + 0.008727, + 0.0, + -1.21297, + -0.008727, + 0.999962, + 0.0, + -1.0998, + 0.0, + 0.0, + 1.0, + -0.116698, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0337_02": [ + 0.993572, + 0.113203, + 0.0, + -2.24354, + -0.113203, + 0.993572, + 0.0, + -2.65555, + 0.0, + 0.0, + 1.0, + -0.370997, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0338_00": [ + -0.573576, + 0.819152, + 0.0, + -0.256954, + -0.819152, + -0.573576, + 0.0, + 3.1325, + 0.0, + 0.0, + 1.0, + -0.072946, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0338_01": [ + -0.996195, + 0.087156, + 0.0, + 1.6661, + -0.087156, + -0.996195, + 0.0, + 2.14478, + 0.0, + 0.0, + 1.0, + -0.065416, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0338_02": [ + -0.989016, + 0.147809, + 0.0, + 1.78657, + -0.147809, + -0.989016, + 0.0, + 2.21264, + 0.0, + 0.0, + 1.0, + -0.073086, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0339_00": [ + -0.992546, + 0.121869, + 0.0, + 1.33, + -0.121869, + -0.992546, + 0.0, + 4.70969, + 0.0, + 0.0, + 1.0, + -0.049539, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0340_00": [ + 0.292372, + 0.956305, + 0.0, + -3.82813, + -0.956305, + 0.292372, + 0.0, + 3.72798, + 0.0, + 0.0, + 1.0, + -0.122006, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0340_01": [ + 0.522498, + 0.85264, + 0.0, + -4.99432, + -0.85264, + 0.522498, + 0.0, + 2.37705, + 0.0, + 0.0, + 1.0, + -0.13971, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0340_02": [ + -0.008727, + 0.999962, + 0.0, + -1.76954, + -0.999962, + -0.008727, + 0.0, + 3.1519, + 0.0, + 0.0, + 1.0, + -0.142687, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0341_00": [ + 0.139173, + 0.990268, + 0.0, + -2.37858, + -0.990268, + 0.139173, + 0.0, + 2.76974, + 0.0, + 0.0, + 1.0, + -0.102411, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0341_01": [ + 0.241922, + 0.970296, + 0.0, + -2.95845, + -0.970296, + 0.241922, + 0.0, + 2.31825, + 0.0, + 0.0, + 1.0, + -0.103239, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0342_00": [ + -0.566406, + 0.824126, + 0.0, + -0.069699, + -0.824126, + -0.566406, + 0.0, + 5.52391, + 0.0, + 0.0, + 1.0, + -0.162621, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0343_00": [ + 0.147809, + 0.989016, + 0.0, + -3.29901, + -0.989016, + 0.147809, + 0.0, + 1.60899, + 0.0, + 0.0, + 1.0, + -0.186277, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0344_00": [ + -0.939693, + 0.34202, + 0.0, + 1.1786, + -0.34202, + -0.939693, + 0.0, + 3.66783, + 0.0, + 0.0, + 1.0, + -0.069963, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0344_01": [ + 0.642788, + 0.766044, + 0.0, + -5.19985, + -0.766044, + 0.642788, + 0.0, + 0.269066, + 0.0, + 0.0, + 1.0, + -0.048368, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0345_00": [ + 0.258819, + 0.965926, + 0.0, + -4.19926, + -0.965926, + 0.258819, + 0.0, + 2.22874, + 0.0, + 0.0, + 1.0, + -0.085717, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0345_01": [ + 0.942641, + 0.333807, + 0.0, + -4.48889, + -0.333807, + 0.942641, + 0.0, + -2.13483, + 0.0, + 0.0, + 1.0, + -0.087114, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0346_00": [ + 0.993572, + 0.113203, + 0.0, + -1.96545, + -0.113203, + 0.993572, + 0.0, + -2.38444, + 0.0, + 0.0, + 1.0, + -0.039415, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0346_01": [ + 0.968148, + 0.25038, + 0.0, + -2.60707, + -0.25038, + 0.968148, + 0.0, + -2.56176, + 0.0, + 0.0, + 1.0, + -0.034085, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0347_00": [ + -0.997564, + 0.069757, + 0.0, + 1.25124, + -0.069757, + -0.997564, + 0.0, + 1.53146, + 0.0, + 0.0, + 1.0, + -0.0865, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0347_01": [ + -0.898794, + 0.438371, + 0.0, + 0.793331, + -0.438371, + -0.898794, + 0.0, + 2.35454, + 0.0, + 0.0, + 1.0, + -0.050942, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0347_02": [ + -0.669131, + 0.743145, + 0.0, + -0.063181, + -0.743145, + -0.669131, + 0.0, + 2.68252, + 0.0, + 0.0, + 1.0, + -0.058, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0348_00": [ + -0.121869, + 0.992546, + 0.0, + -1.18077, + -0.992546, + -0.121869, + 0.0, + 1.56307, + 0.0, + 0.0, + 1.0, + -0.178669, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0348_01": [ + -0.992546, + 0.121869, + 0.0, + 0.67854, + -0.121869, + -0.992546, + 0.0, + 3.84938, + 0.0, + 0.0, + 1.0, + -0.213339, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0348_02": [ + -0.998135, + 0.061049, + 0.0, + 0.972506, + -0.061049, + -0.998135, + 0.0, + 2.62529, + 0.0, + 0.0, + 1.0, + -0.051912, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0349_00": [ + -0.069757, + 0.997564, + 0.0, + -0.725221, + -0.997564, + -0.069757, + 0.0, + 1.31687, + 0.0, + 0.0, + 1.0, + -0.072262, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0349_01": [ + -0.147809, + 0.989016, + 0.0, + -0.795323, + -0.989016, + -0.147809, + 0.0, + 1.09062, + 0.0, + 0.0, + 1.0, + -0.088703, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0350_00": [ + 0.095846, + 0.995396, + 0.0, + -4.73382, + -0.995396, + 0.095846, + 0.0, + 2.36824, + 0.0, + 0.0, + 1.0, + -0.066551, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0350_01": [ + 0.043619, + 0.999048, + 0.0, + -4.71863, + -0.999048, + 0.043619, + 0.0, + 2.54833, + 0.0, + 0.0, + 1.0, + -0.058951, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0350_02": [ + 0.043619, + 0.999048, + 0.0, + -4.60142, + -0.999048, + 0.043619, + 0.0, + 2.09596, + 0.0, + 0.0, + 1.0, + -0.063238, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0351_00": [ + 0.608761, + 0.793353, + 0.0, + -3.66097, + -0.793353, + 0.608761, + 0.0, + 0.212328, + 0.0, + 0.0, + 1.0, + -0.325357, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0351_01": [ + -0.224951, + 0.97437, + 0.0, + -2.26891, + -0.97437, + -0.224951, + 0.0, + 3.75691, + 0.0, + 0.0, + 1.0, + -0.503845, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0352_00": [ + 0.566406, + 0.824126, + 0.0, + -3.62244, + -0.824126, + 0.566406, + 0.0, + 0.827391, + 0.0, + 0.0, + 1.0, + -0.081034, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0352_01": [ + 0.139173, + 0.990268, + 0.0, + -2.12416, + -0.990268, + 0.139173, + 0.0, + 2.1538, + 0.0, + 0.0, + 1.0, + -0.168205, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0352_02": [ + -0.824126, + 0.566406, + 0.0, + -0.213696, + -0.566406, + -0.824126, + 0.0, + 4.86488, + 0.0, + 0.0, + 1.0, + -0.080979, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0353_00": [ + 0.453991, + 0.891007, + 0.0, + -3.6441, + -0.891007, + 0.453991, + 0.0, + 1.42513, + 0.0, + 0.0, + 1.0, + -0.21182, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0353_01": [ + 0.743145, + 0.669131, + 0.0, + -4.41489, + -0.669131, + 0.743145, + 0.0, + -0.692606, + 0.0, + 0.0, + 1.0, + -0.172129, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0353_02": [ + 0.350207, + 0.936672, + 0.0, + -3.25818, + -0.936672, + 0.350207, + 0.0, + 1.75437, + 0.0, + 0.0, + 1.0, + -0.198432, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0354_00": [ + 0.052336, + 0.99863, + 0.0, + -1.78059, + -0.99863, + 0.052336, + 0.0, + 2.19812, + 0.0, + 0.0, + 1.0, + -0.049187, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0355_00": [ + 0.147809, + 0.989016, + 0.0, + -6.10951, + -0.989016, + 0.147809, + 0.0, + 3.49383, + 0.0, + 0.0, + 1.0, + -0.172588, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0355_01": [ + 0.026177, + 0.999657, + 0.0, + -2.42339, + -0.999657, + 0.026177, + 0.0, + 3.48024, + 0.0, + 0.0, + 1.0, + -0.113771, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0356_00": [ + -0.087156, + 0.996195, + 0.0, + -3.68079, + -0.996195, + -0.087156, + 0.0, + 2.82426, + 0.0, + 0.0, + 1.0, + -0.024314, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0356_01": [ + 0.069757, + 0.997564, + 0.0, + -4.13002, + -0.997564, + 0.069757, + 0.0, + 3.02285, + 0.0, + 0.0, + 1.0, + -0.02383, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0356_02": [ + -0.095846, + 0.995396, + 0.0, + -3.7215, + -0.995396, + -0.095846, + 0.0, + 2.67669, + 0.0, + 0.0, + 1.0, + -0.048159, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0357_00": [ + 0.017452, + 0.999848, + 0.0, + -1.72782, + -0.999848, + 0.017452, + 0.0, + 4.84963, + 0.0, + 0.0, + 1.0, + -0.075025, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0357_01": [ + -0.0, + 1.0, + 0.0, + -1.50461, + -1.0, + -0.0, + 0.0, + 1.98435, + 0.0, + 0.0, + 1.0, + -0.08867, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0358_00": [ + -0.113203, + 0.993572, + 0.0, + -1.72038, + -0.993572, + -0.113203, + 0.0, + 3.09441, + 0.0, + 0.0, + 1.0, + -0.088592, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0358_01": [ + -0.078459, + 0.996917, + 0.0, + -1.76813, + -0.996917, + -0.078459, + 0.0, + 3.1867, + 0.0, + 0.0, + 1.0, + -0.05668, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0358_02": [ + -0.0, + 1.0, + 0.0, + -1.94958, + -1.0, + -0.0, + 0.0, + 3.00747, + 0.0, + 0.0, + 1.0, + -0.047495, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0359_00": [ + 0.777146, + 0.62932, + 0.0, + -3.04482, + -0.62932, + 0.777146, + 0.0, + 0.028869, + 0.0, + 0.0, + 1.0, + -0.034069, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0359_01": [ + 0.999391, + 0.034899, + 0.0, + -1.81869, + -0.034899, + 0.999391, + 0.0, + -1.42553, + 0.0, + 0.0, + 1.0, + -0.028032, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0360_00": [ + -0.333807, + 0.942641, + 0.0, + -0.625748, + -0.942641, + -0.333807, + 0.0, + 1.82365, + 0.0, + 0.0, + 1.0, + -0.075694, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0361_00": [ + -0.350207, + 0.936672, + 0.0, + -3.49531, + -0.936672, + -0.350207, + 0.0, + 6.22838, + 0.0, + 0.0, + 1.0, + -0.06822, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0361_01": [ + 0.681998, + 0.731354, + 0.0, + -5.80286, + -0.731354, + 0.681998, + 0.0, + 0.181368, + 0.0, + 0.0, + 1.0, + -0.024824, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0361_02": [ + -0.951056, + 0.309017, + 0.0, + 0.970427, + -0.309017, + -0.951056, + 0.0, + 3.46441, + 0.0, + 0.0, + 1.0, + -0.045983, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0362_00": [ + -0.990268, + 0.139173, + 0.0, + 1.60983, + -0.139173, + -0.990268, + 0.0, + 3.83849, + 0.0, + 0.0, + 1.0, + -0.082755, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0362_01": [ + -0.366501, + 0.930418, + 0.0, + -1.41917, + -0.930418, + -0.366501, + 0.0, + 3.89745, + 0.0, + 0.0, + 1.0, + -0.106685, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0362_02": [ + -0.25038, + 0.968148, + 0.0, + -1.87963, + -0.968148, + -0.25038, + 0.0, + 4.72787, + 0.0, + 0.0, + 1.0, + -0.100635, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0362_03": [ + -0.069757, + 0.997564, + 0.0, + -2.13477, + -0.997564, + -0.069757, + 0.0, + 3.25837, + 0.0, + 0.0, + 1.0, + -0.084055, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0363_00": [ + 0.224951, + 0.97437, + 0.0, + -3.53894, + -0.97437, + 0.224951, + 0.0, + 3.65343, + 0.0, + 0.0, + 1.0, + -0.107704, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0364_00": [ + 0.75471, + 0.656059, + 0.0, + -3.11055, + -0.656059, + 0.75471, + 0.0, + -0.64709, + 0.0, + 0.0, + 1.0, + -0.056907, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0364_01": [ + -0.130526, + 0.991445, + 0.0, + -1.84962, + -0.991445, + -0.130526, + 0.0, + 2.09464, + 0.0, + 0.0, + 1.0, + -0.049287, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0365_00": [ + 0.147809, + 0.989016, + 0.0, + -4.01825, + -0.989016, + 0.147809, + 0.0, + 4.24461, + 0.0, + 0.0, + 1.0, + -0.04698, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0365_01": [ + 0.104528, + 0.994522, + 0.0, + -4.78254, + -0.994522, + 0.104528, + 0.0, + 1.44, + 0.0, + 0.0, + 1.0, + -0.046972, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0365_02": [ + 0.087156, + 0.996195, + 0.0, + -4.70951, + -0.996195, + 0.087156, + 0.0, + 1.45697, + 0.0, + 0.0, + 1.0, + -0.051094, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0366_00": [ + -0.043619, + 0.999048, + 0.0, + -4.46626, + -0.999048, + -0.043619, + 0.0, + 3.72892, + 0.0, + 0.0, + 1.0, + -0.04628, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0367_00": [ + -0.544639, + 0.838671, + 0.0, + -1.83101, + -0.838671, + -0.544639, + 0.0, + 6.0019, + 0.0, + 0.0, + 1.0, + -0.036194, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0367_01": [ + -0.970296, + 0.241922, + 0.0, + 1.56489, + -0.241922, + -0.970296, + 0.0, + 4.5128, + 0.0, + 0.0, + 1.0, + -0.240911, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0368_00": [ + 0.292372, + 0.956305, + 0.0, + -5.10827, + -0.956305, + 0.292372, + 0.0, + 2.5323, + 0.0, + 0.0, + 1.0, + -0.811019, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0368_01": [ + -0.782608, + 0.622515, + 0.0, + 1.54205, + -0.622515, + -0.782608, + 0.0, + 6.52826, + 0.0, + 0.0, + 1.0, + -0.759073, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0369_00": [ + 0.446198, + 0.894934, + 0.0, + -3.58037, + -0.894934, + 0.446198, + 0.0, + 1.19837, + 0.0, + 0.0, + 1.0, + -0.043415, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0369_01": [ + 0.25038, + 0.968148, + 0.0, + -3.03581, + -0.968148, + 0.25038, + 0.0, + 1.99294, + 0.0, + 0.0, + 1.0, + -0.039189, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0369_02": [ + 0.139173, + 0.990268, + 0.0, + -2.47158, + -0.990268, + 0.139173, + 0.0, + 2.2836, + 0.0, + 0.0, + 1.0, + -0.04857, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0370_00": [ + 0.67559, + 0.737277, + 0.0, + -6.06619, + -0.737277, + 0.67559, + 0.0, + -0.504569, + 0.0, + 0.0, + 1.0, + -0.073794, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0370_01": [ + -0.766044, + 0.642788, + 0.0, + 1.25271, + -0.642788, + -0.766044, + 0.0, + 6.5524, + 0.0, + 0.0, + 1.0, + -0.063428, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0370_02": [ + -0.087156, + 0.996195, + 0.0, + -4.48713, + -0.996195, + -0.087156, + 0.0, + 5.53907, + 0.0, + 0.0, + 1.0, + -0.092056, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0371_00": [ + -0.515038, + 0.857167, + 0.0, + -1.6319, + -0.857167, + -0.515038, + 0.0, + 3.14671, + 0.0, + 0.0, + 1.0, + -0.023121, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0371_01": [ + -0.026177, + 0.999657, + 0.0, + -2.33595, + -0.999657, + -0.026177, + 0.0, + 4.12446, + 0.0, + 0.0, + 1.0, + -0.029663, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0372_00": [ + 0.008727, + 0.999962, + 0.0, + -2.1177, + -0.999962, + 0.008727, + 0.0, + 3.3573, + 0.0, + 0.0, + 1.0, + -0.039598, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0373_00": [ + -0.983255, + 0.182236, + 0.0, + 2.16428, + -0.182236, + -0.983255, + 0.0, + 2.44786, + 0.0, + 0.0, + 1.0, + -0.041276, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0373_01": [ + -0.997564, + 0.069757, + 0.0, + 1.62709, + -0.069757, + -0.997564, + 0.0, + 1.62621, + 0.0, + 0.0, + 1.0, + -0.036425, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0374_00": [ + -0.829038, + 0.559193, + 0.0, + -0.018955, + -0.559193, + -0.829038, + 0.0, + 7.35792, + 0.0, + 0.0, + 1.0, + -0.08731, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0375_00": [ + 0.061049, + 0.998135, + 0.0, + -0.952592, + -0.998135, + 0.061049, + 0.0, + 1.45205, + 0.0, + 0.0, + 1.0, + -0.034575, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0375_01": [ + 0.300706, + 0.953717, + 0.0, + -1.39157, + -0.953717, + 0.300706, + 0.0, + 1.2277, + 0.0, + 0.0, + 1.0, + -0.037399, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0375_02": [ + -0.224951, + 0.97437, + 0.0, + -0.570272, + -0.97437, + -0.224951, + 0.0, + 3.26429, + 0.0, + 0.0, + 1.0, + -0.06486, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0376_00": [ + -0.104529, + 0.994522, + 0.0, + -1.3182, + -0.994522, + -0.104529, + 0.0, + 2.06891, + 0.0, + 0.0, + 1.0, + -0.069971, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0376_01": [ + -0.824126, + 0.566406, + 0.0, + 0.078885, + -0.566406, + -0.824126, + 0.0, + 3.48997, + 0.0, + 0.0, + 1.0, + -0.034385, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0376_02": [ + -0.21644, + 0.976296, + 0.0, + -1.2141, + -0.976296, + -0.21644, + 0.0, + 2.36535, + 0.0, + 0.0, + 1.0, + -0.087614, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0377_00": [ + 0.069757, + 0.997564, + 0.0, + -1.16563, + -0.997564, + 0.069757, + 0.0, + 3.30963, + 0.0, + 0.0, + 1.0, + -0.062199, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0377_01": [ + 0.130526, + 0.991445, + 0.0, + -1.52169, + -0.991445, + 0.130526, + 0.0, + 2.4748, + 0.0, + 0.0, + 1.0, + -0.075911, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0377_02": [ + -0.017452, + 0.999848, + 0.0, + -0.87452, + -0.999848, + -0.017452, + 0.0, + 3.40594, + 0.0, + 0.0, + 1.0, + -0.043677, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0378_00": [ + -0.798635, + 0.601815, + 0.0, + 0.186474, + -0.601815, + -0.798635, + 0.0, + 3.44048, + 0.0, + 0.0, + 1.0, + -0.041026, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0378_01": [ + 0.681998, + 0.731354, + 0.0, + -3.59234, + -0.731354, + 0.681998, + 0.0, + 0.615796, + 0.0, + 0.0, + 1.0, + -0.079492, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0378_02": [ + 0.430511, + 0.902585, + 0.0, + -3.15356, + -0.902585, + 0.430511, + 0.0, + 1.84695, + 0.0, + 0.0, + 1.0, + -0.047399, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0379_00": [ + -0.156434, + 0.987688, + 0.0, + -1.99896, + -0.987688, + -0.156434, + 0.0, + 3.17528, + 0.0, + 0.0, + 1.0, + -0.093636, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0380_00": [ + -0.241922, + 0.970296, + 0.0, + -1.55815, + -0.970296, + -0.241922, + 0.0, + 2.90149, + 0.0, + 0.0, + 1.0, + -0.047617, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0380_01": [ + 0.406737, + 0.913545, + 0.0, + -3.78115, + -0.913545, + 0.406737, + 0.0, + 1.38341, + 0.0, + 0.0, + 1.0, + -0.170795, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0380_02": [ + -0.438371, + 0.898794, + 0.0, + -0.893657, + -0.898794, + -0.438371, + 0.0, + 3.81011, + 0.0, + 0.0, + 1.0, + -0.072531, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0381_00": [ + -0.453991, + 0.891006, + 0.0, + -1.02348, + -0.891006, + -0.453991, + 0.0, + 4.05296, + 0.0, + 0.0, + 1.0, + -1.38961, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0381_01": [ + 0.300706, + 0.953717, + 0.0, + -2.95794, + -0.953717, + 0.300706, + 0.0, + 1.98849, + 0.0, + 0.0, + 1.0, + -1.51006, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0381_02": [ + 0.986286, + 0.165048, + 0.0, + -2.76431, + -0.165048, + 0.986286, + 0.0, + -3.26686, + 0.0, + 0.0, + 1.0, + -1.61499, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0382_00": [ + -0.034899, + 0.999391, + 0.0, + -1.46149, + -0.999391, + -0.034899, + 0.0, + 2.24009, + 0.0, + 0.0, + 1.0, + -0.036321, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0382_01": [ + -0.034899, + 0.999391, + 0.0, + -1.52733, + -0.999391, + -0.034899, + 0.0, + 2.18218, + 0.0, + 0.0, + 1.0, + -0.03745, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0383_00": [ + -0.043619, + 0.999048, + 0.0, + -1.06927, + -0.999048, + -0.043619, + 0.0, + 2.88245, + 0.0, + 0.0, + 1.0, + -0.081021, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0383_01": [ + 0.034899, + 0.999391, + 0.0, + -1.3488, + -0.999391, + 0.034899, + 0.0, + 2.77308, + 0.0, + 0.0, + 1.0, + -0.075577, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0383_02": [ + -0.087156, + 0.996195, + 0.0, + -1.16011, + -0.996195, + -0.087156, + 0.0, + 3.11089, + 0.0, + 0.0, + 1.0, + -0.068804, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0384_00": [ + 0.882948, + 0.469472, + 0.0, + -6.1075, + -0.469472, + 0.882948, + 0.0, + -1.0107, + 0.0, + 0.0, + 1.0, + -0.055133, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0385_00": [ + -0.461749, + 0.887011, + 0.0, + -0.970008, + -0.887011, + -0.461749, + 0.0, + 3.52882, + 0.0, + 0.0, + 1.0, + -0.08601, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0385_01": [ + 0.707107, + 0.707107, + 0.0, + -4.89311, + -0.707107, + 0.707107, + 0.0, + 0.390457, + 0.0, + 0.0, + 1.0, + -0.113213, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0385_02": [ + -0.069757, + 0.997564, + 0.0, + -2.99209, + -0.997564, + -0.069757, + 0.0, + 2.61365, + 0.0, + 0.0, + 1.0, + -0.174093, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0386_00": [ + -0.017452, + 0.999848, + 0.0, + -4.27959, + -0.999848, + -0.017452, + 0.0, + 4.02357, + 0.0, + 0.0, + 1.0, + -0.192045, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0387_00": [ + -0.608761, + 0.793353, + 0.0, + -0.995759, + -0.793353, + -0.608761, + 0.0, + 5.65206, + 0.0, + 0.0, + 1.0, + -0.047573, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0387_01": [ + 0.909961, + 0.414693, + 0.0, + -5.42364, + -0.414693, + 0.909961, + 0.0, + -2.3044, + 0.0, + 0.0, + 1.0, + -0.038839, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0387_02": [ + 0.622515, + 0.782608, + 0.0, + -7.04702, + -0.782608, + 0.622515, + 0.0, + 0.049479, + 0.0, + 0.0, + 1.0, + -0.023437, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0388_00": [ + -0.848048, + 0.529919, + 0.0, + 0.379266, + -0.529919, + -0.848048, + 0.0, + 3.99907, + 0.0, + 0.0, + 1.0, + -0.075948, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0388_01": [ + -0.078459, + 0.996917, + 0.0, + -1.57319, + -0.996917, + -0.078459, + 0.0, + 2.41555, + 0.0, + 0.0, + 1.0, + -0.062221, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0389_00": [ + 0.165048, + 0.986286, + 0.0, + -3.11269, + -0.986286, + 0.165048, + 0.0, + 2.97568, + 0.0, + 0.0, + 1.0, + -0.171651, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0390_00": [ + -0.182235, + 0.983255, + 0.0, + -1.92686, + -0.983255, + -0.182235, + 0.0, + 4.39521, + 0.0, + 0.0, + 1.0, + -0.058245, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0391_00": [ + -0.857167, + 0.515038, + 0.0, + 1.27087, + -0.515038, + -0.857167, + 0.0, + 5.91327, + 0.0, + 0.0, + 1.0, + -0.066828, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0392_00": [ + -0.96363, + 0.267238, + 0.0, + 2.91743, + -0.267238, + -0.96363, + 0.0, + 4.91131, + 0.0, + 0.0, + 1.0, + -0.100604, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0392_01": [ + 0.927184, + 0.374607, + 0.0, + -4.71461, + -0.374607, + 0.927184, + 0.0, + -2.29397, + 0.0, + 0.0, + 1.0, + -0.11467, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0392_02": [ + -0.052336, + 0.99863, + 0.0, + -2.36027, + -0.99863, + -0.052336, + 0.0, + 3.80253, + 0.0, + 0.0, + 1.0, + -0.121434, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0393_00": [ + 0.725374, + 0.688355, + 0.0, + -4.42826, + -0.688355, + 0.725374, + 0.0, + -0.765503, + 0.0, + 0.0, + 1.0, + -0.065781, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0393_01": [ + 0.93358, + 0.358368, + 0.0, + -4.8037, + -0.358368, + 0.93358, + 0.0, + -3.59653, + 0.0, + 0.0, + 1.0, + -0.053202, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0393_02": [ + -0.62932, + 0.777146, + 0.0, + -0.609836, + -0.777146, + -0.62932, + 0.0, + 4.94029, + 0.0, + 0.0, + 1.0, + -0.047799, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0394_00": [ + -0.043619, + 0.999048, + 0.0, + -3.64559, + -0.999048, + -0.043619, + 0.0, + 6.75984, + 0.0, + 0.0, + 1.0, + -0.149401, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0394_01": [ + -0.999657, + 0.026177, + 0.0, + 2.61847, + -0.026177, + -0.999657, + 0.0, + 3.30422, + 0.0, + 0.0, + 1.0, + -0.114174, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0395_00": [ + 0.438371, + 0.898794, + 0.0, + -4.46121, + -0.898794, + 0.438371, + 0.0, + 3.02884, + 0.0, + 0.0, + 1.0, + -0.132128, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0395_01": [ + -0.165048, + 0.986286, + 0.0, + -1.48056, + -0.986286, + -0.165048, + 0.0, + 5.14742, + 0.0, + 0.0, + 1.0, + -0.085327, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0395_02": [ + 0.25038, + 0.968148, + 0.0, + -3.67966, + -0.968148, + 0.25038, + 0.0, + 4.27256, + 0.0, + 0.0, + 1.0, + -0.197423, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0396_00": [ + -0.999657, + 0.026177, + 0.0, + 1.35492, + -0.026177, + -0.999657, + 0.0, + 3.18608, + 0.0, + 0.0, + 1.0, + -0.021114, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0396_01": [ + -0.095846, + 0.995396, + 0.0, + -1.49645, + -0.995396, + -0.095846, + 0.0, + 2.9703, + 0.0, + 0.0, + 1.0, + -0.024663, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0396_02": [ + -0.999048, + 0.043619, + 0.0, + 1.28955, + -0.043619, + -0.999048, + 0.0, + 3.20935, + 0.0, + 0.0, + 1.0, + -0.02303, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0397_00": [ + 0.469472, + 0.882948, + 0.0, + -3.49564, + -0.882948, + 0.469472, + 0.0, + 1.15048, + 0.0, + 0.0, + 1.0, + -0.110095, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0397_01": [ + 0.961262, + 0.275637, + 0.0, + -2.93494, + -0.275637, + 0.961262, + 0.0, + -1.81248, + 0.0, + 0.0, + 1.0, + -0.186652, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0398_00": [ + -0.999848, + 0.017453, + 0.0, + 1.96873, + -0.017453, + -0.999848, + 0.0, + 2.68852, + 0.0, + 0.0, + 1.0, + -0.056257, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0398_01": [ + 0.891007, + 0.453991, + 0.0, + -3.56795, + -0.453991, + 0.891007, + 0.0, + -1.48338, + 0.0, + 0.0, + 1.0, + -0.061364, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0399_00": [ + -0.069757, + 0.997564, + 0.0, + -3.89561, + -0.997564, + -0.069757, + 0.0, + 3.40086, + 0.0, + 0.0, + 1.0, + -0.027627, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0399_01": [ + 0.997564, + 0.069757, + 0.0, + -3.42594, + -0.069757, + 0.997564, + 0.0, + -4.10525, + 0.0, + 0.0, + 1.0, + -0.026025, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0400_00": [ + -0.461749, + 0.887011, + 0.0, + -1.12067, + -0.887011, + -0.461749, + 0.0, + 3.83256, + 0.0, + 0.0, + 1.0, + -0.051216, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0400_01": [ + -0.469472, + 0.882948, + 0.0, + -1.10421, + -0.882948, + -0.469472, + 0.0, + 3.74005, + 0.0, + 0.0, + 1.0, + -0.054451, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0401_00": [ + 0.147809, + 0.989016, + 0.0, + -3.39287, + -0.989016, + 0.147809, + 0.0, + 3.37083, + 0.0, + 0.0, + 1.0, + -0.068996, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0402_00": [ + 0.165048, + 0.986286, + 0.0, + -2.41257, + -0.986286, + 0.165048, + 0.0, + 2.77195, + 0.0, + 0.0, + 1.0, + -0.044775, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0403_00": [ + 0.798636, + 0.601815, + 0.0, + -6.48227, + -0.601815, + 0.798636, + 0.0, + -1.21315, + 0.0, + 0.0, + 1.0, + -0.67881, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0403_01": [ + 0.870356, + 0.492424, + 0.0, + -4.31, + -0.492424, + 0.870356, + 0.0, + -1.95049, + 0.0, + 0.0, + 1.0, + -0.100082, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0404_00": [ + -0.052336, + 0.99863, + 0.0, + -1.43958, + -0.99863, + -0.052336, + 0.0, + 1.93061, + 0.0, + 0.0, + 1.0, + -0.215351, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0404_01": [ + -0.034899, + 0.999391, + 0.0, + -1.5067, + -0.999391, + -0.034899, + 0.0, + 1.87521, + 0.0, + 0.0, + 1.0, + -0.201, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0404_02": [ + 0.104528, + 0.994522, + 0.0, + -1.50665, + -0.994522, + 0.104528, + 0.0, + 1.55865, + 0.0, + 0.0, + 1.0, + -0.190527, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0405_00": [ + -0.190809, + 0.981627, + 0.0, + -0.885558, + -0.981627, + -0.190809, + 0.0, + 2.40757, + 0.0, + 0.0, + 1.0, + -0.03727, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0406_00": [ + -0.414693, + 0.909961, + 0.0, + -0.807243, + -0.909961, + -0.414693, + 0.0, + 2.16422, + 0.0, + 0.0, + 1.0, + -0.038313, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0406_01": [ + 0.078459, + 0.996917, + 0.0, + -1.30066, + -0.996917, + 0.078459, + 0.0, + 1.27944, + 0.0, + 0.0, + 1.0, + -0.048727, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0406_02": [ + 0.199368, + 0.979925, + 0.0, + -1.76625, + -0.979925, + 0.199368, + 0.0, + 1.0565, + 0.0, + 0.0, + 1.0, + -0.05297, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0407_00": [ + 0.989016, + 0.147809, + 0.0, + -3.26471, + -0.147809, + 0.989016, + 0.0, + -4.44419, + 0.0, + 0.0, + 1.0, + -0.04839, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0407_01": [ + -0.207912, + 0.978148, + 0.0, + -2.16858, + -0.978148, + -0.207912, + 0.0, + 2.32121, + 0.0, + 0.0, + 1.0, + -0.076381, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0408_00": [ + -0.793353, + 0.608761, + 0.0, + 1.12235, + -0.608761, + -0.793353, + 0.0, + 4.79565, + 0.0, + 0.0, + 1.0, + -0.123534, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0408_01": [ + 0.309017, + 0.951057, + 0.0, + -3.18615, + -0.951057, + 0.309017, + 0.0, + 2.46648, + 0.0, + 0.0, + 1.0, + -0.137454, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0409_00": [ + -0.113203, + 0.993572, + 0.0, + -2.84023, + -0.993572, + -0.113203, + 0.0, + 2.66866, + 0.0, + 0.0, + 1.0, + -1.22539, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0409_01": [ + 0.309017, + 0.951057, + 0.0, + -3.4897, + -0.951057, + 0.309017, + 0.0, + 2.18651, + 0.0, + 0.0, + 1.0, + -0.0533, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0410_00": [ + 0.008727, + 0.999962, + 0.0, + -0.573186, + -0.999962, + 0.008727, + 0.0, + 1.06519, + 0.0, + 0.0, + 1.0, + -0.079495, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0410_01": [ + -0.0, + 1.0, + 0.0, + -0.579624, + -1.0, + -0.0, + 0.0, + 1.07659, + 0.0, + 0.0, + 1.0, + -0.05891, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0411_00": [ + 0.936672, + 0.350207, + 0.0, + -6.66348, + -0.350207, + 0.936672, + 0.0, + -3.7874, + 0.0, + 0.0, + 1.0, + -0.084658, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0411_01": [ + 0.156434, + 0.987688, + 0.0, + -4.0638, + -0.987688, + 0.156434, + 0.0, + 4.87516, + 0.0, + 0.0, + 1.0, + -0.061897, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0411_02": [ + 0.995396, + 0.095846, + 0.0, + -3.77423, + -0.095846, + 0.995396, + 0.0, + -4.78567, + 0.0, + 0.0, + 1.0, + -0.068218, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0412_00": [ + 0.992546, + 0.121869, + 0.0, + -3.44803, + -0.121869, + 0.992546, + 0.0, + -2.92533, + 0.0, + 0.0, + 1.0, + -0.11241, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0412_01": [ + 0.978148, + 0.207912, + 0.0, + -4.28624, + -0.207912, + 0.978148, + 0.0, + -2.55447, + 0.0, + 0.0, + 1.0, + -0.088743, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0413_00": [ + -0.052336, + 0.99863, + 0.0, + -4.20327, + -0.99863, + -0.052336, + 0.0, + 3.46569, + 0.0, + 0.0, + 1.0, + -0.053054, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0414_00": [ + 0.843391, + 0.5373, + 0.0, + -5.24188, + -0.5373, + 0.843391, + 0.0, + -1.18966, + 0.0, + 0.0, + 1.0, + -0.10221, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0415_00": [ + 0.422618, + 0.906308, + 0.0, + -2.27597, + -0.906308, + 0.422618, + 0.0, + 0.726104, + 0.0, + 0.0, + 1.0, + -0.045973, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0415_01": [ + 0.996195, + 0.087156, + 0.0, + -1.46471, + -0.087156, + 0.996195, + 0.0, + -1.33729, + 0.0, + 0.0, + 1.0, + -0.038627, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0415_02": [ + -0.996195, + 0.087156, + 0.0, + 1.27404, + -0.087156, + -0.996195, + 0.0, + 1.50657, + 0.0, + 0.0, + 1.0, + -0.063779, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0416_00": [ + -0.121869, + 0.992546, + 0.0, + -1.98624, + -0.992546, + -0.121869, + 0.0, + 3.4957, + 0.0, + 0.0, + 1.0, + -0.135234, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0416_01": [ + 0.017452, + 0.999848, + 0.0, + -2.35634, + -0.999848, + 0.017452, + 0.0, + 3.61579, + 0.0, + 0.0, + 1.0, + -0.11, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0416_02": [ + -0.087156, + 0.996195, + 0.0, + -2.13403, + -0.996195, + -0.087156, + 0.0, + 3.25298, + 0.0, + 0.0, + 1.0, + -0.119115, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0416_03": [ + -0.325568, + 0.945519, + 0.0, + -1.6038, + -0.945519, + -0.325568, + 0.0, + 5.28542, + 0.0, + 0.0, + 1.0, + -0.135018, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0416_04": [ + 0.990268, + 0.139173, + 0.0, + -2.90528, + -0.139173, + 0.990268, + 0.0, + -4.19534, + 0.0, + 0.0, + 1.0, + -0.094614, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0417_00": [ + -0.771625, + 0.636078, + 0.0, + 0.139054, + -0.636078, + -0.771625, + 0.0, + 4.50966, + 0.0, + 0.0, + 1.0, + -0.085459, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0418_00": [ + -0.984808, + 0.173648, + 0.0, + 2.34014, + -0.173648, + -0.984808, + 0.0, + 3.47258, + 0.0, + 0.0, + 1.0, + -0.098267, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0418_01": [ + -0.809017, + 0.587785, + 0.0, + 0.776553, + -0.587785, + -0.809017, + 0.0, + 4.83276, + 0.0, + 0.0, + 1.0, + -0.091099, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0418_02": [ + -0.97237, + 0.233445, + 0.0, + 2.21862, + -0.233445, + -0.97237, + 0.0, + 3.29565, + 0.0, + 0.0, + 1.0, + -0.07416, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0419_00": [ + 0.078459, + 0.996917, + 0.0, + -2.32407, + -0.996917, + 0.078459, + 0.0, + 4.32053, + 0.0, + 0.0, + 1.0, + -0.066263, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0419_01": [ + -0.317305, + 0.948324, + 0.0, + -1.47313, + -0.948324, + -0.317305, + 0.0, + 5.46354, + 0.0, + 0.0, + 1.0, + -0.169087, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0419_02": [ + 0.461749, + 0.887011, + 0.0, + -5.05345, + -0.887011, + 0.461749, + 0.0, + 2.69524, + 0.0, + 0.0, + 1.0, + -0.070702, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0420_00": [ + 0.587785, + 0.809017, + 0.0, + -6.17076, + -0.809017, + 0.587785, + 0.0, + 1.18698, + 0.0, + 0.0, + 1.0, + -0.050335, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0420_01": [ + 0.052336, + 0.99863, + 0.0, + -4.01581, + -0.99863, + 0.052336, + 0.0, + 3.26807, + 0.0, + 0.0, + 1.0, + -0.050333, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0420_02": [ + 0.165048, + 0.986286, + 0.0, + -4.87144, + -0.986286, + 0.165048, + 0.0, + 3.03872, + 0.0, + 0.0, + 1.0, + -0.060475, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0421_00": [ + -0.996195, + 0.087156, + 0.0, + 1.09219, + -0.087156, + -0.996195, + 0.0, + 1.79617, + 0.0, + 0.0, + 1.0, + -0.209232, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0421_01": [ + 0.999848, + 0.017452, + 0.0, + -1.7132, + -0.017452, + 0.999848, + 0.0, + -1.60745, + 0.0, + 0.0, + 1.0, + -0.210005, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0421_02": [ + 0.953717, + 0.300706, + 0.0, + -2.02629, + -0.300706, + 0.953717, + 0.0, + -1.46291, + 0.0, + 0.0, + 1.0, + -0.200226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0422_00": [ + 0.91706, + 0.398749, + 0.0, + -5.55705, + -0.398749, + 0.91706, + 0.0, + -2.19466, + 0.0, + 0.0, + 1.0, + -0.100391, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0423_00": [ + 0.999848, + 0.017452, + 0.0, + -3.31861, + -0.017452, + 0.999848, + 0.0, + -2.84946, + 0.0, + 0.0, + 1.0, + -0.020757, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0423_01": [ + 0.515038, + 0.857167, + 0.0, + -3.78224, + -0.857167, + 0.515038, + 0.0, + 2.2285, + 0.0, + 0.0, + 1.0, + -0.030587, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0423_02": [ + -0.333807, + 0.942641, + 0.0, + -2.2775, + -0.942641, + -0.333807, + 0.0, + 4.4913, + 0.0, + 0.0, + 1.0, + -0.044156, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0424_00": [ + -0.199368, + 0.979925, + 0.0, + -1.47989, + -0.979925, + -0.199368, + 0.0, + 2.80393, + 0.0, + 0.0, + 1.0, + -0.187401, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0424_01": [ + 0.819152, + 0.573576, + 0.0, + -3.28024, + -0.573576, + 0.819152, + 0.0, + -0.018613, + 0.0, + 0.0, + 1.0, + -0.121646, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0424_02": [ + 0.147809, + 0.989016, + 0.0, + -2.35658, + -0.989016, + 0.147809, + 0.0, + 1.64474, + 0.0, + 0.0, + 1.0, + -0.401811, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0425_00": [ + -0.069757, + 0.997564, + 0.0, + -1.63349, + -0.997564, + -0.069757, + 0.0, + 2.34487, + 0.0, + 0.0, + 1.0, + -0.080338, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0425_01": [ + -0.224951, + 0.97437, + 0.0, + -1.4095, + -0.97437, + -0.224951, + 0.0, + 2.62686, + 0.0, + 0.0, + 1.0, + -0.073327, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0426_00": [ + -0.034899, + 0.999391, + 0.0, + -1.71907, + -0.999391, + -0.034899, + 0.0, + 2.18099, + 0.0, + 0.0, + 1.0, + -0.069446, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0426_01": [ + -0.034899, + 0.999391, + 0.0, + -1.65058, + -0.999391, + -0.034899, + 0.0, + 2.00978, + 0.0, + 0.0, + 1.0, + -0.089263, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0426_02": [ + -0.130526, + 0.991445, + 0.0, + -1.9182, + -0.991445, + -0.130526, + 0.0, + 2.06326, + 0.0, + 0.0, + 1.0, + -0.062318, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0426_03": [ + 0.970296, + 0.241922, + 0.0, + -2.37835, + -0.241922, + 0.970296, + 0.0, + -1.72588, + 0.0, + 0.0, + 1.0, + -0.077254, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0427_00": [ + -0.446198, + 0.894934, + 0.0, + -0.772572, + -0.894934, + -0.446198, + 0.0, + 2.70639, + 0.0, + 0.0, + 1.0, + -0.036992, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0428_00": [ + -0.793353, + 0.608761, + 0.0, + 1.05131, + -0.608761, + -0.793353, + 0.0, + 5.49394, + 0.0, + 0.0, + 1.0, + -0.230024, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0428_01": [ + -0.961262, + 0.275637, + 0.0, + 2.54881, + -0.275637, + -0.961262, + 0.0, + 4.23202, + 0.0, + 0.0, + 1.0, + -0.192154, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0429_00": [ + -0.67559, + 0.737277, + 0.0, + -0.188539, + -0.737277, + -0.67559, + 0.0, + 5.45573, + 0.0, + 0.0, + 1.0, + -0.069876, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0430_00": [ + 0.104528, + 0.994522, + 0.0, + -3.50156, + -0.994522, + 0.104528, + 0.0, + 2.66741, + 0.0, + 0.0, + 1.0, + -0.075516, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0430_01": [ + -0.104529, + 0.994522, + 0.0, + -2.24822, + -0.994522, + -0.104529, + 0.0, + 4.32866, + 0.0, + 0.0, + 1.0, + -0.068802, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0431_00": [ + -0.965926, + 0.258819, + 0.0, + 2.02397, + -0.258819, + -0.965926, + 0.0, + 3.77608, + 0.0, + 0.0, + 1.0, + -0.100856, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0432_00": [ + 0.233445, + 0.97237, + 0.0, + -4.03144, + -0.97237, + 0.233445, + 0.0, + 1.15121, + 0.0, + 0.0, + 1.0, + -0.034964, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0432_01": [ + -0.017452, + 0.999848, + 0.0, + -1.68137, + -0.999848, + -0.017452, + 0.0, + 1.95347, + 0.0, + 0.0, + 1.0, + -0.035839, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0433_00": [ + 0.999391, + 0.034899, + 0.0, + -4.37312, + -0.034899, + 0.999391, + 0.0, + -3.54868, + 0.0, + 0.0, + 1.0, + -0.064605, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0434_00": [ + 0.017452, + 0.999848, + 0.0, + -0.942918, + -0.999848, + 0.017452, + 0.0, + 1.44832, + 0.0, + 0.0, + 1.0, + -0.027944, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0434_01": [ + -0.017452, + 0.999848, + 0.0, + -0.618745, + -0.999848, + -0.017452, + 0.0, + 1.10175, + 0.0, + 0.0, + 1.0, + -0.030025, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0434_02": [ + -0.139173, + 0.990268, + 0.0, + -0.480612, + -0.990268, + -0.139173, + 0.0, + 3.17274, + 0.0, + 0.0, + 1.0, + -0.027429, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0435_00": [ + 0.043619, + 0.999048, + 0.0, + -2.07456, + -0.999048, + 0.043619, + 0.0, + 3.71371, + 0.0, + 0.0, + 1.0, + -0.079959, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0435_01": [ + 0.275637, + 0.961262, + 0.0, + -4.0292, + -0.961262, + 0.275637, + 0.0, + 4.03079, + 0.0, + 0.0, + 1.0, + -0.137097, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0435_02": [ + 0.566406, + 0.824126, + 0.0, + -4.62345, + -0.824126, + 0.566406, + 0.0, + 1.58256, + 0.0, + 0.0, + 1.0, + -0.114378, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0435_03": [ + 0.207912, + 0.978148, + 0.0, + -3.5251, + -0.978148, + 0.207912, + 0.0, + 4.7628, + 0.0, + 0.0, + 1.0, + -0.107106, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0436_00": [ + -0.325568, + 0.945519, + 0.0, + -0.731983, + -0.945519, + -0.325568, + 0.0, + 2.63797, + 0.0, + 0.0, + 1.0, + -0.084267, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0437_00": [ + -0.995396, + 0.095846, + 0.0, + 3.32741, + -0.095846, + -0.995396, + 0.0, + 3.8039, + 0.0, + 0.0, + 1.0, + -0.040465, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0437_01": [ + -0.71934, + 0.694658, + 0.0, + 0.918015, + -0.694658, + -0.71934, + 0.0, + 3.59296, + 0.0, + 0.0, + 1.0, + -0.042, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0438_00": [ + 0.008727, + 0.999962, + 0.0, + -1.45486, + -0.999962, + 0.008727, + 0.0, + 1.92112, + 0.0, + 0.0, + 1.0, + -0.112491, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0439_00": [ + -0.052336, + 0.99863, + 0.0, + -1.16616, + -0.99863, + -0.052336, + 0.0, + 3.89933, + 0.0, + 0.0, + 1.0, + -0.057197, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0439_01": [ + -0.814116, + 0.580703, + 0.0, + 0.711365, + -0.580703, + -0.814116, + 0.0, + 5.35005, + 0.0, + 0.0, + 1.0, + -0.050689, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0440_00": [ + 0.99863, + 0.052336, + 0.0, + -3.14977, + -0.052336, + 0.99863, + 0.0, + -2.12505, + 0.0, + 0.0, + 1.0, + -0.060125, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0440_01": [ + 0.317305, + 0.948324, + 0.0, + -3.79902, + -0.948324, + 0.317305, + 0.0, + 1.90637, + 0.0, + 0.0, + 1.0, + -0.044502, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0440_02": [ + 0.993572, + 0.113203, + 0.0, + -2.45294, + -0.113203, + 0.993572, + 0.0, + -2.74015, + 0.0, + 0.0, + 1.0, + -0.043297, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0441_00": [ + -0.529919, + 0.848048, + 0.0, + -0.735283, + -0.848048, + -0.529919, + 0.0, + 3.1499, + 0.0, + 0.0, + 1.0, + -0.03829, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0442_00": [ + -0.649448, + 0.760406, + 0.0, + -0.863482, + -0.760406, + -0.649448, + 0.0, + 6.68661, + 0.0, + 0.0, + 1.0, + -0.088679, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0443_00": [ + -0.902585, + 0.430511, + 0.0, + 2.15533, + -0.430511, + -0.902585, + 0.0, + 5.16541, + 0.0, + 0.0, + 1.0, + -0.216555, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0444_00": [ + 0.087156, + 0.996195, + 0.0, + -0.922076, + -0.996195, + 0.087156, + 0.0, + 2.33007, + 0.0, + 0.0, + 1.0, + -0.139864, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0444_01": [ + -0.087156, + 0.996195, + 0.0, + -0.149217, + -0.996195, + -0.087156, + 0.0, + 0.61164, + 0.0, + 0.0, + 1.0, + -0.05561, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0445_00": [ + -0.956305, + 0.292372, + 0.0, + 1.47009, + -0.292372, + -0.956305, + 0.0, + 2.50441, + 0.0, + 0.0, + 1.0, + -0.253085, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0445_01": [ + -0.034899, + 0.999391, + 0.0, + -2.13142, + -0.999391, + -0.034899, + 0.0, + 1.54093, + 0.0, + 0.0, + 1.0, + -0.074695, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0446_00": [ + 0.999048, + 0.043619, + 0.0, + -1.53819, + -0.043619, + 0.999048, + 0.0, + -0.980426, + 0.0, + 0.0, + 1.0, + -0.053284, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0446_01": [ + -0.061049, + 0.998135, + 0.0, + -0.575803, + -0.998135, + -0.061049, + 0.0, + 1.22501, + 0.0, + 0.0, + 1.0, + -0.049265, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0447_00": [ + 0.309017, + 0.951057, + 0.0, + -3.88271, + -0.951057, + 0.309017, + 0.0, + 1.45682, + 0.0, + 0.0, + 1.0, + -0.057002, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0447_01": [ + 0.333807, + 0.942641, + 0.0, + -3.88008, + -0.942641, + 0.333807, + 0.0, + 1.25141, + 0.0, + 0.0, + 1.0, + -0.058856, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0447_02": [ + -0.566406, + 0.824126, + 0.0, + -0.254845, + -0.824126, + -0.566406, + 0.0, + 4.09492, + 0.0, + 0.0, + 1.0, + -0.049196, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0448_00": [ + -0.224951, + 0.97437, + 0.0, + -0.654455, + -0.97437, + -0.224951, + 0.0, + 1.56069, + 0.0, + 0.0, + 1.0, + -0.038181, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0448_01": [ + 0.052336, + 0.99863, + 0.0, + -2.12119, + -0.99863, + 0.052336, + 0.0, + 1.69966, + 0.0, + 0.0, + 1.0, + -0.039055, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0448_02": [ + -0.104529, + 0.994522, + 0.0, + -0.726175, + -0.994522, + -0.104529, + 0.0, + 1.44771, + 0.0, + 0.0, + 1.0, + -0.049674, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0449_00": [ + -0.999962, + 0.008727, + 0.0, + 1.31572, + -0.008727, + -0.999962, + 0.0, + 1.2364, + 0.0, + 0.0, + 1.0, + -0.078443, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0449_01": [ + 0.999962, + 0.008727, + 0.0, + -1.22177, + -0.008727, + 0.999962, + 0.0, + -1.80433, + 0.0, + 0.0, + 1.0, + -0.088183, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0449_02": [ + 0.965926, + 0.258819, + 0.0, + -1.86995, + -0.258819, + 0.965926, + 0.0, + -0.773561, + 0.0, + 0.0, + 1.0, + -0.06033, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0450_00": [ + -0.275637, + 0.961262, + 0.0, + -2.92764, + -0.961262, + -0.275637, + 0.0, + 5.38236, + 0.0, + 0.0, + 1.0, + -0.041799, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0451_00": [ + -0.573576, + 0.819152, + 0.0, + -0.552828, + -0.819152, + -0.573576, + 0.0, + 5.30658, + 0.0, + 0.0, + 1.0, + -0.036996, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0451_01": [ + -0.507538, + 0.861629, + 0.0, + -0.584828, + -0.861629, + -0.507538, + 0.0, + 4.42468, + 0.0, + 0.0, + 1.0, + -0.065224, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0451_02": [ + 0.515038, + 0.857167, + 0.0, + -4.45968, + -0.857167, + 0.515038, + 0.0, + 1.43894, + 0.0, + 0.0, + 1.0, + -0.053154, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0451_03": [ + 0.956305, + 0.292372, + 0.0, + -4.24084, + -0.292372, + 0.956305, + 0.0, + -2.36348, + 0.0, + 0.0, + 1.0, + -0.059879, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0451_04": [ + -0.078459, + 0.996917, + 0.0, + -2.09452, + -0.996917, + -0.078459, + 0.0, + 4.98288, + 0.0, + 0.0, + 1.0, + -0.07537, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0451_05": [ + 0.777146, + 0.62932, + 0.0, + -6.32083, + -0.62932, + 0.777146, + 0.0, + -0.028559, + 0.0, + 0.0, + 1.0, + -0.208237, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0452_00": [ + -0.325568, + 0.945519, + 0.0, + -1.77369, + -0.945519, + -0.325568, + 0.0, + 3.30159, + 0.0, + 0.0, + 1.0, + -0.068166, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0452_01": [ + 0.113203, + 0.993572, + 0.0, + -2.07444, + -0.993572, + 0.113203, + 0.0, + 2.33486, + 0.0, + 0.0, + 1.0, + -0.092051, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0452_02": [ + -0.207912, + 0.978148, + 0.0, + -1.86659, + -0.978148, + -0.207912, + 0.0, + 2.96592, + 0.0, + 0.0, + 1.0, + -0.08341, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0453_00": [ + -0.996917, + 0.078459, + 0.0, + 2.64553, + -0.078459, + -0.996917, + 0.0, + 4.39695, + 0.0, + 0.0, + 1.0, + -0.06706, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0453_01": [ + -0.984808, + 0.173648, + 0.0, + 5.14809, + -0.173648, + -0.984808, + 0.0, + 6.73727, + 0.0, + 0.0, + 1.0, + -0.065243, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0454_00": [ + 0.008727, + 0.999962, + 0.0, + -2.76217, + -0.999962, + 0.008727, + 0.0, + 2.70928, + 0.0, + 0.0, + 1.0, + -0.119153, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0455_00": [ + 0.999048, + 0.043619, + 0.0, + -4.64211, + -0.043619, + 0.999048, + 0.0, + -5.80289, + 0.0, + 0.0, + 1.0, + -0.052181, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0456_00": [ + 0.087156, + 0.996195, + 0.0, + -2.74799, + -0.996195, + 0.087156, + 0.0, + 3.32289, + 0.0, + 0.0, + 1.0, + -0.052875, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0456_01": [ + 0.173648, + 0.984808, + 0.0, + -2.73337, + -0.984808, + 0.173648, + 0.0, + 3.06662, + 0.0, + 0.0, + 1.0, + -0.052146, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0457_00": [ + -0.453991, + 0.891006, + 0.0, + -0.704871, + -0.891006, + -0.453991, + 0.0, + 2.93184, + 0.0, + 0.0, + 1.0, + -0.020382, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0457_01": [ + 0.993572, + 0.113203, + 0.0, + -1.99802, + -0.113203, + 0.993572, + 0.0, + -2.11624, + 0.0, + 0.0, + 1.0, + -0.027252, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0457_02": [ + -0.622515, + 0.782608, + 0.0, + -0.379064, + -0.782608, + -0.622515, + 0.0, + 3.28908, + 0.0, + 0.0, + 1.0, + -0.02687, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0458_00": [ + -0.008727, + 0.999962, + 0.0, + -0.809878, + -0.999962, + -0.008727, + 0.0, + 3.06831, + 0.0, + 0.0, + 1.0, + -0.08778, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0458_01": [ + -0.999962, + 0.008727, + 0.0, + 1.17483, + -0.008727, + -0.999962, + 0.0, + 1.45243, + 0.0, + 0.0, + 1.0, + -0.113166, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0459_00": [ + 0.061049, + 0.998135, + 0.0, + -2.21596, + -0.998135, + 0.061049, + 0.0, + 2.9125, + 0.0, + 0.0, + 1.0, + -0.04408, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0459_01": [ + -0.580703, + 0.814116, + 0.0, + -1.78221, + -0.814116, + -0.580703, + 0.0, + 6.76649, + 0.0, + 0.0, + 1.0, + -0.058151, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0460_00": [ + -0.207912, + 0.978148, + 0.0, + -0.859093, + -0.978148, + -0.207912, + 0.0, + 3.574, + 0.0, + 0.0, + 1.0, + -0.505228, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0461_00": [ + -0.087156, + 0.996195, + 0.0, + -1.25021, + -0.996195, + -0.087156, + 0.0, + 1.30007, + 0.0, + 0.0, + 1.0, + -0.030352, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0462_00": [ + -0.642788, + 0.766044, + 0.0, + -1.69246, + -0.766044, + -0.642788, + 0.0, + 4.96676, + 0.0, + 0.0, + 1.0, + -0.122598, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0463_00": [ + 0.999391, + 0.034899, + 0.0, + -2.31112, + -0.034899, + 0.999391, + 0.0, + -1.95183, + 0.0, + 0.0, + 1.0, + -0.03177, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0463_01": [ + -0.113203, + 0.993572, + 0.0, + -1.89955, + -0.993572, + -0.113203, + 0.0, + 3.7264, + 0.0, + 0.0, + 1.0, + -0.044674, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0464_00": [ + -0.139173, + 0.990268, + 0.0, + -1.23567, + -0.990268, + -0.139173, + 0.0, + 3.16367, + 0.0, + 0.0, + 1.0, + -0.05373, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0465_00": [ + -0.999391, + 0.034899, + 0.0, + 2.80509, + -0.034899, + -0.999391, + 0.0, + 2.41245, + 0.0, + 0.0, + 1.0, + -0.205191, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0465_01": [ + -0.991445, + 0.130526, + 0.0, + 2.89621, + -0.130526, + -0.991445, + 0.0, + 2.74128, + 0.0, + 0.0, + 1.0, + -0.127043, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0466_00": [ + 0.743145, + 0.669131, + 0.0, + -6.21054, + -0.669131, + 0.743145, + 0.0, + -0.393543, + 0.0, + 0.0, + 1.0, + -0.155268, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0466_01": [ + -0.71934, + 0.694658, + 0.0, + -0.69012, + -0.694658, + -0.71934, + 0.0, + 7.42572, + 0.0, + 0.0, + 1.0, + -0.358383, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0467_00": [ + -0.113203, + 0.993572, + 0.0, + -1.31843, + -0.993572, + -0.113203, + 0.0, + 3.36846, + 0.0, + 0.0, + 1.0, + -0.050971, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0468_00": [ + 0.224951, + 0.97437, + 0.0, + -2.92167, + -0.97437, + 0.224951, + 0.0, + 1.75821, + 0.0, + 0.0, + 1.0, + -0.048876, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0468_01": [ + -0.233445, + 0.97237, + 0.0, + -1.61602, + -0.97237, + -0.233445, + 0.0, + 3.05223, + 0.0, + 0.0, + 1.0, + -0.036341, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0468_02": [ + 0.902585, + 0.430511, + 0.0, + -3.78342, + -0.430511, + 0.902585, + 0.0, + -1.86196, + 0.0, + 0.0, + 1.0, + -0.027814, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0469_00": [ + 0.594823, + 0.803857, + 0.0, + -3.73563, + -0.803857, + 0.594823, + 0.0, + 0.729948, + 0.0, + 0.0, + 1.0, + -0.034977, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0469_01": [ + -0.961262, + 0.275637, + 0.0, + 1.53374, + -0.275637, + -0.961262, + 0.0, + 3.78857, + 0.0, + 0.0, + 1.0, + -0.029232, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0469_02": [ + 0.522498, + 0.85264, + 0.0, + -3.70667, + -0.85264, + 0.522498, + 0.0, + 0.897658, + 0.0, + 0.0, + 1.0, + -0.042229, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0470_00": [ + -0.078459, + 0.996917, + 0.0, + -1.19683, + -0.996917, + -0.078459, + 0.0, + 1.89895, + 0.0, + 0.0, + 1.0, + -0.064286, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0470_01": [ + 0.887011, + 0.461749, + 0.0, + -2.63306, + -0.461749, + 0.887011, + 0.0, + -0.775283, + 0.0, + 0.0, + 1.0, + -0.032302, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0471_00": [ + 0.190809, + 0.981627, + 0.0, + -1.73344, + -0.981627, + 0.190809, + 0.0, + 1.06033, + 0.0, + 0.0, + 1.0, + -0.01677, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0471_01": [ + 0.182236, + 0.983255, + 0.0, + -1.69413, + -0.983255, + 0.182236, + 0.0, + 1.02909, + 0.0, + 0.0, + 1.0, + -0.025575, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0471_02": [ + 0.366501, + 0.930418, + 0.0, + -1.99543, + -0.930418, + 0.366501, + 0.0, + 0.776881, + 0.0, + 0.0, + 1.0, + -0.034626, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0472_00": [ + -0.587785, + 0.809017, + 0.0, + -1.08034, + -0.809017, + -0.587785, + 0.0, + 6.4532, + 0.0, + 0.0, + 1.0, + -0.058376, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0472_01": [ + -0.147809, + 0.989016, + 0.0, + -2.43155, + -0.989016, + -0.147809, + 0.0, + 3.88032, + 0.0, + 0.0, + 1.0, + -0.058314, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0472_02": [ + -0.390731, + 0.920505, + 0.0, + -1.65992, + -0.920505, + -0.390731, + 0.0, + 5.64717, + 0.0, + 0.0, + 1.0, + -0.051029, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0473_00": [ + -0.043619, + 0.999048, + 0.0, + -1.49685, + -0.999048, + -0.043619, + 0.0, + 2.07555, + 0.0, + 0.0, + 1.0, + -0.089322, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0473_01": [ + -0.995396, + 0.095846, + 0.0, + 3.10338, + -0.095846, + -0.995396, + 0.0, + 2.34612, + 0.0, + 0.0, + 1.0, + -0.078908, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0474_00": [ + -0.438371, + 0.898794, + 0.0, + -1.05643, + -0.898794, + -0.438371, + 0.0, + 4.16533, + 0.0, + 0.0, + 1.0, + -0.050523, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0474_01": [ + -0.398749, + 0.91706, + 0.0, + -1.66588, + -0.91706, + -0.398749, + 0.0, + 3.94588, + 0.0, + 0.0, + 1.0, + -0.042066, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0474_02": [ + -0.87462, + 0.48481, + 0.0, + 0.338034, + -0.48481, + -0.87462, + 0.0, + 4.29235, + 0.0, + 0.0, + 1.0, + -0.065509, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0474_03": [ + -0.034899, + 0.999391, + 0.0, + -1.195, + -0.999391, + -0.034899, + 0.0, + 4.13045, + 0.0, + 0.0, + 1.0, + -0.078669, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0474_04": [ + -0.284015, + 0.95882, + 0.0, + -1.15624, + -0.95882, + -0.284015, + 0.0, + 4.52774, + 0.0, + 0.0, + 1.0, + -0.132388, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0474_05": [ + 0.608761, + 0.793353, + 0.0, + -3.82949, + -0.793353, + 0.608761, + 0.0, + 0.960623, + 0.0, + 0.0, + 1.0, + -0.184655, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0475_00": [ + -0.976296, + 0.21644, + 0.0, + 3.35153, + -0.21644, + -0.976296, + 0.0, + 6.69989, + 0.0, + 0.0, + 1.0, + -0.064129, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0475_01": [ + -0.333807, + 0.942641, + 0.0, + -2.33982, + -0.942641, + -0.333807, + 0.0, + 7.38637, + 0.0, + 0.0, + 1.0, + -0.149954, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0475_02": [ + -0.642788, + 0.766044, + 0.0, + -1.88725, + -0.766044, + -0.642788, + 0.0, + 6.27493, + 0.0, + 0.0, + 1.0, + -0.068598, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0476_00": [ + -0.156434, + 0.987688, + 0.0, + -1.47781, + -0.987688, + -0.156434, + 0.0, + 3.14531, + 0.0, + 0.0, + 1.0, + -0.037963, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0476_01": [ + -0.224951, + 0.97437, + 0.0, + -1.66492, + -0.97437, + -0.224951, + 0.0, + 2.4695, + 0.0, + 0.0, + 1.0, + -0.056467, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0476_02": [ + 0.207912, + 0.978148, + 0.0, + -2.69223, + -0.978148, + 0.207912, + 0.0, + 1.95546, + 0.0, + 0.0, + 1.0, + -0.036349, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0477_00": [ + 0.292372, + 0.956305, + 0.0, + -5.07516, + -0.956305, + 0.292372, + 0.0, + 5.89247, + 0.0, + 0.0, + 1.0, + -0.193033, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0477_01": [ + -0.95882, + 0.284015, + 0.0, + 1.58542, + -0.284015, + -0.95882, + 0.0, + 5.42924, + 0.0, + 0.0, + 1.0, + -0.065588, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0478_00": [ + 0.121869, + 0.992546, + 0.0, + -1.98626, + -0.992546, + 0.121869, + 0.0, + 3.01085, + 0.0, + 0.0, + 1.0, + -0.03007, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0478_01": [ + 0.139173, + 0.990268, + 0.0, + -2.1113, + -0.990268, + 0.139173, + 0.0, + 3.50414, + 0.0, + 0.0, + 1.0, + -0.054529, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0479_00": [ + 0.034899, + 0.999391, + 0.0, + -1.42128, + -0.999391, + 0.034899, + 0.0, + 1.16333, + 0.0, + 0.0, + 1.0, + -0.322, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0479_01": [ + -0.034899, + 0.999391, + 0.0, + -1.26624, + -0.999391, + -0.034899, + 0.0, + 1.4786, + 0.0, + 0.0, + 1.0, + -0.603235, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0479_02": [ + -0.069757, + 0.997564, + 0.0, + -1.34074, + -0.997564, + -0.069757, + 0.0, + 1.64147, + 0.0, + 0.0, + 1.0, + -0.507805, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0480_00": [ + 0.414693, + 0.909961, + 0.0, + -4.75825, + -0.909961, + 0.414693, + 0.0, + 1.08456, + 0.0, + 0.0, + 1.0, + -0.050521, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0480_01": [ + -0.21644, + 0.976296, + 0.0, + -3.41666, + -0.976296, + -0.21644, + 0.0, + 2.77543, + 0.0, + 0.0, + 1.0, + -0.068726, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0481_00": [ + -0.25038, + 0.968148, + 0.0, + -3.10727, + -0.968148, + -0.25038, + 0.0, + 4.9122, + 0.0, + 0.0, + 1.0, + -0.030672, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0481_01": [ + 0.93358, + 0.358368, + 0.0, + -6.57161, + -0.358368, + 0.93358, + 0.0, + -2.02349, + 0.0, + 0.0, + 1.0, + -0.032149, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0482_00": [ + -0.190809, + 0.981627, + 0.0, + -1.06138, + -0.981627, + -0.190809, + 0.0, + 2.91523, + 0.0, + 0.0, + 1.0, + -0.066064, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0482_01": [ + -0.258819, + 0.965926, + 0.0, + -0.863732, + -0.965926, + -0.258819, + 0.0, + 2.92547, + 0.0, + 0.0, + 1.0, + -0.057348, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0483_00": [ + 0.782608, + 0.622515, + 0.0, + -6.39871, + -0.622515, + 0.782608, + 0.0, + -0.821072, + 0.0, + 0.0, + 1.0, + -0.094591, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0484_00": [ + 0.979925, + 0.199368, + 0.0, + -4.41119, + -0.199368, + 0.979925, + 0.0, + -2.0843, + 0.0, + 0.0, + 1.0, + -0.064148, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0484_01": [ + 0.71934, + 0.694658, + 0.0, + -6.3234, + -0.694658, + 0.71934, + 0.0, + -0.460126, + 0.0, + 0.0, + 1.0, + -0.059846, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0485_00": [ + -0.446198, + 0.894934, + 0.0, + -1.89214, + -0.894934, + -0.446198, + 0.0, + 2.93732, + 0.0, + 0.0, + 1.0, + -0.089953, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0486_00": [ + 0.333807, + 0.942641, + 0.0, + -6.43321, + -0.942641, + 0.333807, + 0.0, + 4.57464, + 0.0, + 0.0, + 1.0, + -0.107871, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0487_00": [ + 0.121869, + 0.992546, + 0.0, + -2.77316, + -0.992546, + 0.121869, + 0.0, + 4.16111, + 0.0, + 0.0, + 1.0, + -0.114777, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0487_01": [ + -0.275637, + 0.961262, + 0.0, + -3.02292, + -0.961262, + -0.275637, + 0.0, + 5.08556, + 0.0, + 0.0, + 1.0, + -0.164533, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0488_00": [ + 0.34202, + 0.939693, + 0.0, + -4.27036, + -0.939693, + 0.34202, + 0.0, + 1.61615, + 0.0, + 0.0, + 1.0, + -0.028226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0488_01": [ + -0.956305, + 0.292372, + 0.0, + 1.61047, + -0.292372, + -0.956305, + 0.0, + 4.34951, + 0.0, + 0.0, + 1.0, + -0.040737, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0489_00": [ + 0.939693, + 0.34202, + 0.0, + -5.14376, + -0.34202, + 0.939693, + 0.0, + -2.21861, + 0.0, + 0.0, + 1.0, + -0.064072, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0489_01": [ + 0.965926, + 0.258819, + 0.0, + -4.92074, + -0.258819, + 0.965926, + 0.0, + -2.76224, + 0.0, + 0.0, + 1.0, + -0.061927, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0489_02": [ + 0.559193, + 0.829038, + 0.0, + -5.81214, + -0.829038, + 0.559193, + 0.0, + 0.929478, + 0.0, + 0.0, + 1.0, + -0.054619, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0490_00": [ + 0.930418, + 0.366501, + 0.0, + -6.50698, + -0.366501, + 0.930418, + 0.0, + -1.9538, + 0.0, + 0.0, + 1.0, + -0.047326, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0491_00": [ + -0.095846, + 0.995396, + 0.0, + -1.35892, + -0.995396, + -0.095846, + 0.0, + 3.78893, + 0.0, + 0.0, + 1.0, + -0.140675, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0492_00": [ + -0.996195, + 0.087156, + 0.0, + 4.84613, + -0.087156, + -0.996195, + 0.0, + 2.57266, + 0.0, + 0.0, + 1.0, + -0.07747, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0492_01": [ + 0.979925, + 0.199368, + 0.0, + -2.80119, + -0.199368, + 0.979925, + 0.0, + -3.19423, + 0.0, + 0.0, + 1.0, + -0.06578, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0493_00": [ + 0.258819, + 0.965926, + 0.0, + -3.42586, + -0.965926, + 0.258819, + 0.0, + 1.384, + 0.0, + 0.0, + 1.0, + -0.109379, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0493_01": [ + 0.993572, + 0.113203, + 0.0, + -2.91096, + -0.113203, + 0.993572, + 0.0, + -2.78451, + 0.0, + 0.0, + 1.0, + -0.077416, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0494_00": [ + 0.199368, + 0.979925, + 0.0, + -3.1634, + -0.979925, + 0.199368, + 0.0, + 0.812662, + 0.0, + 0.0, + 1.0, + -0.023465, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0495_00": [ + 0.061049, + 0.998135, + 0.0, + -4.38702, + -0.998135, + 0.061049, + 0.0, + 2.89848, + 0.0, + 0.0, + 1.0, + -0.096112, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0496_00": [ + 0.997564, + 0.069757, + 0.0, + -2.99667, + -0.069757, + 0.997564, + 0.0, + -2.60304, + 0.0, + 0.0, + 1.0, + -0.094524, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0497_00": [ + 1.0, + 0.0, + 0.0, + -3.8089, + 0.0, + 1.0, + 0.0, + -9.58885, + 0.0, + 0.0, + 1.0, + -0.24663, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0498_00": [ + -0.999657, + 0.026177, + 0.0, + 4.28263, + -0.026177, + -0.999657, + 0.0, + 2.34815, + 0.0, + 0.0, + 1.0, + -0.084975, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0498_01": [ + 0.061049, + 0.998135, + 0.0, + -3.4487, + -0.998135, + 0.061049, + 0.0, + 3.10746, + 0.0, + 0.0, + 1.0, + -0.093765, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0498_02": [ + -0.165048, + 0.986286, + 0.0, + -1.33883, + -0.986286, + -0.165048, + 0.0, + 2.6009, + 0.0, + 0.0, + 1.0, + -0.101277, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0499_00": [ + 0.989016, + 0.147809, + 0.0, + -4.98309, + -0.147809, + 0.989016, + 0.0, + -3.64476, + 0.0, + 0.0, + 1.0, + -0.407064, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0500_00": [ + -0.97237, + 0.233445, + 0.0, + 1.68557, + -0.233445, + -0.97237, + 0.0, + 6.75359, + 0.0, + 0.0, + 1.0, + -0.188096, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0500_01": [ + -0.5, + 0.866025, + 0.0, + -1.83336, + -0.866025, + -0.5, + 0.0, + 7.00891, + 0.0, + 0.0, + 1.0, + -0.130957, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0501_00": [ + -0.25038, + 0.968148, + 0.0, + -1.22307, + -0.968148, + -0.25038, + 0.0, + 2.70956, + 0.0, + 0.0, + 1.0, + -0.088229, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0501_01": [ + 0.147809, + 0.989016, + 0.0, + -1.95612, + -0.989016, + 0.147809, + 0.0, + 1.63638, + 0.0, + 0.0, + 1.0, + -0.108771, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0501_02": [ + 0.052336, + 0.99863, + 0.0, + -1.51221, + -0.99863, + 0.052336, + 0.0, + 1.56703, + 0.0, + 0.0, + 1.0, + -0.050456, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0502_00": [ + 0.62932, + 0.777146, + 0.0, + -5.1101, + -0.777146, + 0.62932, + 0.0, + -0.46416, + 0.0, + 0.0, + 1.0, + -0.085728, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0502_01": [ + 0.25038, + 0.968148, + 0.0, + -5.69769, + -0.968148, + 0.25038, + 0.0, + 2.48712, + 0.0, + 0.0, + 1.0, + -0.080419, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0502_02": [ + -0.069757, + 0.997564, + 0.0, + -3.29676, + -0.997564, + -0.069757, + 0.0, + 3.93379, + 0.0, + 0.0, + 1.0, + -0.039252, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0503_00": [ + 0.139173, + 0.990268, + 0.0, + -3.75691, + -0.990268, + 0.139173, + 0.0, + 3.44978, + 0.0, + 0.0, + 1.0, + -0.056877, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0504_00": [ + 0.043619, + 0.999048, + 0.0, + -2.79376, + -0.999048, + 0.043619, + 0.0, + 2.85846, + 0.0, + 0.0, + 1.0, + -0.081535, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0505_00": [ + -0.317305, + 0.948324, + 0.0, + -2.155, + -0.948324, + -0.317305, + 0.0, + 4.0425, + 0.0, + 0.0, + 1.0, + -0.098287, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0505_01": [ + -0.788011, + 0.615661, + 0.0, + 0.972584, + -0.615661, + -0.788011, + 0.0, + 5.06554, + 0.0, + 0.0, + 1.0, + -0.166035, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0505_02": [ + -0.93358, + 0.358368, + 0.0, + 2.25964, + -0.358368, + -0.93358, + 0.0, + 4.38727, + 0.0, + 0.0, + 1.0, + -0.129254, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0505_03": [ + -0.224951, + 0.97437, + 0.0, + -2.68711, + -0.97437, + -0.224951, + 0.0, + 3.59724, + 0.0, + 0.0, + 1.0, + -0.089458, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0505_04": [ + -0.93358, + 0.358368, + 0.0, + 2.1382, + -0.358368, + -0.93358, + 0.0, + 4.5404, + 0.0, + 0.0, + 1.0, + -0.136144, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0506_00": [ + -0.104529, + 0.994522, + 0.0, + -2.52922, + -0.994522, + -0.104529, + 0.0, + 2.79074, + 0.0, + 0.0, + 1.0, + -0.044564, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0507_00": [ + 0.998135, + 0.061049, + 0.0, + -2.12429, + -0.061049, + 0.998135, + 0.0, + -2.9907, + 0.0, + 0.0, + 1.0, + -0.161052, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0508_00": [ + 0.008727, + 0.999962, + 0.0, + -3.21453, + -0.999962, + 0.008727, + 0.0, + 3.71211, + 0.0, + 0.0, + 1.0, + -0.045138, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0508_01": [ + 0.043619, + 0.999048, + 0.0, + -1.81692, + -0.999048, + 0.043619, + 0.0, + 2.82633, + 0.0, + 0.0, + 1.0, + -0.047438, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0508_02": [ + -0.987688, + 0.156434, + 0.0, + 1.44197, + -0.156434, + -0.987688, + 0.0, + 3.90674, + 0.0, + 0.0, + 1.0, + -0.040159, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0509_00": [ + -0.0, + 1.0, + 0.0, + -3.81524, + -1.0, + -0.0, + 0.0, + 1.28558, + 0.0, + 0.0, + 1.0, + -0.035455, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0509_01": [ + -0.207912, + 0.978148, + 0.0, + -1.06791, + -0.978148, + -0.207912, + 0.0, + 1.57703, + 0.0, + 0.0, + 1.0, + -0.053284, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0509_02": [ + 0.996195, + 0.087156, + 0.0, + -1.1209, + -0.087156, + 0.996195, + 0.0, + -1.24385, + 0.0, + 0.0, + 1.0, + -0.038331, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0510_00": [ + -0.156434, + 0.987688, + 0.0, + -2.53052, + -0.987688, + -0.156434, + 0.0, + 4.76717, + 0.0, + 0.0, + 1.0, + -0.047656, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0510_01": [ + -0.156434, + 0.987688, + 0.0, + -2.53509, + -0.987688, + -0.156434, + 0.0, + 4.73138, + 0.0, + 0.0, + 1.0, + -0.045999, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0510_02": [ + -0.0, + 1.0, + 0.0, + -2.75718, + -1.0, + -0.0, + 0.0, + 4.14621, + 0.0, + 0.0, + 1.0, + -0.043057, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0511_00": [ + -0.34202, + 0.939693, + 0.0, + -0.789437, + -0.939693, + -0.34202, + 0.0, + 2.91924, + 0.0, + 0.0, + 1.0, + -0.087665, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0511_01": [ + -0.173648, + 0.984808, + 0.0, + -0.954871, + -0.984808, + -0.173648, + 0.0, + 2.58809, + 0.0, + 0.0, + 1.0, + -0.0952, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0512_00": [ + -0.636078, + 0.771625, + 0.0, + -0.379609, + -0.771625, + -0.636078, + 0.0, + 5.04533, + 0.0, + 0.0, + 1.0, + -0.045609, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0513_00": [ + -0.551937, + 0.833886, + 0.0, + -1.14087, + -0.833886, + -0.551937, + 0.0, + 6.00851, + 0.0, + 0.0, + 1.0, + -0.031545, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0514_00": [ + -0.99863, + 0.052336, + 0.0, + 2.19171, + -0.052336, + -0.99863, + 0.0, + 4.63262, + 0.0, + 0.0, + 1.0, + -0.103255, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0514_01": [ + -0.978148, + 0.207912, + 0.0, + 1.53669, + -0.207912, + -0.978148, + 0.0, + 4.92012, + 0.0, + 0.0, + 1.0, + -0.095445, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0515_00": [ + 0.961262, + 0.275637, + 0.0, + -6.00786, + -0.275637, + 0.961262, + 0.0, + -2.32937, + 0.0, + 0.0, + 1.0, + -0.042559, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0515_01": [ + 0.165048, + 0.986286, + 0.0, + -3.41131, + -0.986286, + 0.165048, + 0.0, + 2.83643, + 0.0, + 0.0, + 1.0, + -0.044225, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0515_02": [ + -0.121869, + 0.992546, + 0.0, + -4.75428, + -0.992546, + -0.121869, + 0.0, + 4.12374, + 0.0, + 0.0, + 1.0, + -0.060211, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0516_00": [ + 0.026177, + 0.999657, + 0.0, + -3.47812, + -0.999657, + 0.026177, + 0.0, + 3.93701, + 0.0, + 0.0, + 1.0, + -0.157398, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0516_01": [ + -0.965926, + 0.258819, + 0.0, + 2.90639, + -0.258819, + -0.965926, + 0.0, + 3.40866, + 0.0, + 0.0, + 1.0, + -0.177864, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0517_00": [ + -0.350207, + 0.936672, + 0.0, + -2.93635, + -0.936672, + -0.350207, + 0.0, + 6.35462, + 0.0, + 0.0, + 1.0, + -0.154226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0517_01": [ + 0.087156, + 0.996195, + 0.0, + -2.02629, + -0.996195, + 0.087156, + 0.0, + 2.56226, + 0.0, + 0.0, + 1.0, + -0.123497, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0517_02": [ + -0.580703, + 0.814116, + 0.0, + -1.71697, + -0.814116, + -0.580703, + 0.0, + 6.13884, + 0.0, + 0.0, + 1.0, + -0.17499, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0518_00": [ + -0.979925, + 0.199368, + 0.0, + 1.33178, + -0.199368, + -0.979925, + 0.0, + 3.39152, + 0.0, + 0.0, + 1.0, + -0.073973, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0519_00": [ + 0.017452, + 0.999848, + 0.0, + -1.00257, + -0.999848, + 0.017452, + 0.0, + 1.62466, + 0.0, + 0.0, + 1.0, + -0.030149, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0520_00": [ + -0.095846, + 0.995396, + 0.0, + -3.24355, + -0.995396, + -0.095846, + 0.0, + 3.92693, + 0.0, + 0.0, + 1.0, + -0.07236, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0520_01": [ + 0.477159, + 0.878817, + 0.0, + -6.46443, + -0.878817, + 0.477159, + 0.0, + 1.74741, + 0.0, + 0.0, + 1.0, + -0.157802, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0521_00": [ + -0.809017, + 0.587785, + 0.0, + 0.204306, + -0.587785, + -0.809017, + 0.0, + 3.9484, + 0.0, + 0.0, + 1.0, + -0.102571, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0522_00": [ + -0.017452, + 0.999848, + 0.0, + -1.86425, + -0.999848, + -0.017452, + 0.0, + 2.15049, + 0.0, + 0.0, + 1.0, + -0.052039, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0523_00": [ + -0.095846, + 0.995396, + 0.0, + -1.57266, + -0.995396, + -0.095846, + 0.0, + 1.35535, + 0.0, + 0.0, + 1.0, + -0.026851, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0523_01": [ + -0.224951, + 0.97437, + 0.0, + -1.40252, + -0.97437, + -0.224951, + 0.0, + 1.74703, + 0.0, + 0.0, + 1.0, + -0.025692, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0523_02": [ + -0.999391, + 0.034899, + 0.0, + 0.751444, + -0.034899, + -0.999391, + 0.0, + 1.13241, + 0.0, + 0.0, + 1.0, + -0.03565, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0524_00": [ + 0.207912, + 0.978148, + 0.0, + -3.24613, + -0.978148, + 0.207912, + 0.0, + 3.22756, + 0.0, + 0.0, + 1.0, + -0.086123, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0524_01": [ + 0.991445, + 0.130526, + 0.0, + -2.46298, + -0.130526, + 0.991445, + 0.0, + -3.69307, + 0.0, + 0.0, + 1.0, + -0.079866, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0525_00": [ + 0.374607, + 0.927184, + 0.0, + -3.56147, + -0.927184, + 0.374607, + 0.0, + 1.39937, + 0.0, + 0.0, + 1.0, + -0.058462, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0525_01": [ + -0.927184, + 0.374606, + 0.0, + 0.835759, + -0.374606, + -0.927184, + 0.0, + 5.71869, + 0.0, + 0.0, + 1.0, + -0.033525, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0525_02": [ + 0.284015, + 0.95882, + 0.0, + -3.25567, + -0.95882, + 0.284015, + 0.0, + 2.00527, + 0.0, + 0.0, + 1.0, + -0.060215, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0526_00": [ + 0.406737, + 0.913545, + 0.0, + -2.76451, + -0.913545, + 0.406737, + 0.0, + 1.32238, + 0.0, + 0.0, + 1.0, + -0.367518, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0526_01": [ + -0.113203, + 0.993572, + 0.0, + -0.87673, + -0.993572, + -0.113203, + 0.0, + 3.51967, + 0.0, + 0.0, + 1.0, + -0.033583, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0527_00": [ + 0.113203, + 0.993572, + 0.0, + -1.74184, + -0.993572, + 0.113203, + 0.0, + 3.16791, + 0.0, + 0.0, + 1.0, + -0.062325, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0528_00": [ + -0.095846, + 0.995396, + 0.0, + -3.59231, + -0.995396, + -0.095846, + 0.0, + 2.64311, + 0.0, + 0.0, + 1.0, + -0.032995, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0528_01": [ + -0.061049, + 0.998135, + 0.0, + -3.51559, + -0.998135, + -0.061049, + 0.0, + 2.45636, + 0.0, + 0.0, + 1.0, + -0.048699, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0529_00": [ + -0.992546, + 0.121869, + 0.0, + 2.04019, + -0.121869, + -0.992546, + 0.0, + 4.07464, + 0.0, + 0.0, + 1.0, + -0.097366, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0529_01": [ + -0.078459, + 0.996917, + 0.0, + -2.49718, + -0.996917, + -0.078459, + 0.0, + 4.03676, + 0.0, + 0.0, + 1.0, + -0.177388, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0529_02": [ + -0.130526, + 0.991445, + 0.0, + -2.15103, + -0.991445, + -0.130526, + 0.0, + 4.43661, + 0.0, + 0.0, + 1.0, + -0.092605, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0530_00": [ + 0.207912, + 0.978148, + 0.0, + -2.79982, + -0.978148, + 0.207912, + 0.0, + 2.6016, + 0.0, + 0.0, + 1.0, + -0.539649, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0531_00": [ + -0.165048, + 0.986286, + 0.0, + -0.283956, + -0.986286, + -0.165048, + 0.0, + 1.26579, + 0.0, + 0.0, + 1.0, + -0.018452, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0532_00": [ + 0.087156, + 0.996195, + 0.0, + -2.93412, + -0.996195, + 0.087156, + 0.0, + 2.67587, + 0.0, + 0.0, + 1.0, + -0.067706, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0532_01": [ + 0.095846, + 0.995396, + 0.0, + -4.24974, + -0.995396, + 0.095846, + 0.0, + 1.98689, + 0.0, + 0.0, + 1.0, + -0.05984, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0533_00": [ + 0.5373, + 0.843391, + 0.0, + -6.54021, + -0.843391, + 0.5373, + 0.0, + 1.74863, + 0.0, + 0.0, + 1.0, + -0.164653, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0533_01": [ + 0.275637, + 0.961262, + 0.0, + -6.67256, + -0.961262, + 0.275637, + 0.0, + 3.60347, + 0.0, + 0.0, + 1.0, + -0.130936, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0534_00": [ + 0.25038, + 0.968148, + 0.0, + -2.89056, + -0.968148, + 0.25038, + 0.0, + 4.54294, + 0.0, + 0.0, + 1.0, + -0.071258, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0534_01": [ + 0.333807, + 0.942641, + 0.0, + -4.24677, + -0.942641, + 0.333807, + 0.0, + 2.33936, + 0.0, + 0.0, + 1.0, + -0.092319, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0535_00": [ + -0.052336, + 0.99863, + 0.0, + -3.18955, + -0.99863, + -0.052336, + 0.0, + 2.15071, + 0.0, + 0.0, + 1.0, + -0.362202, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0536_00": [ + 0.087156, + 0.996195, + 0.0, + -4.09113, + -0.996195, + 0.087156, + 0.0, + 4.35666, + 0.0, + 0.0, + 1.0, + -0.131887, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0536_01": [ + -0.034899, + 0.999391, + 0.0, + -2.43806, + -0.999391, + -0.034899, + 0.0, + 4.87515, + 0.0, + 0.0, + 1.0, + -0.129125, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0536_02": [ + 0.224951, + 0.97437, + 0.0, + -3.56187, + -0.97437, + 0.224951, + 0.0, + 2.87822, + 0.0, + 0.0, + 1.0, + -0.109018, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0537_00": [ + -0.406737, + 0.913545, + 0.0, + -0.661509, + -0.913545, + -0.406737, + 0.0, + 4.83153, + 0.0, + 0.0, + 1.0, + -0.071854, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0538_00": [ + -0.970296, + 0.241922, + 0.0, + 0.792574, + -0.241922, + -0.970296, + 0.0, + 2.07561, + 0.0, + 0.0, + 1.0, + -0.062196, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0539_00": [ + 0.173648, + 0.984808, + 0.0, + -1.76762, + -0.984808, + 0.173648, + 0.0, + 1.56099, + 0.0, + 0.0, + 1.0, + -0.049565, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0539_01": [ + 0.995396, + 0.095846, + 0.0, + -2.47361, + -0.095846, + 0.995396, + 0.0, + -1.50559, + 0.0, + 0.0, + 1.0, + -0.143633, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0539_02": [ + 0.970296, + 0.241922, + 0.0, + -2.20648, + -0.241922, + 0.970296, + 0.0, + -1.18863, + 0.0, + 0.0, + 1.0, + -0.121854, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0540_00": [ + 0.477159, + 0.878817, + 0.0, + -4.28089, + -0.878817, + 0.477159, + 0.0, + 2.25519, + 0.0, + 0.0, + 1.0, + -0.054556, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0540_01": [ + 0.694658, + 0.71934, + 0.0, + -4.15209, + -0.71934, + 0.694658, + 0.0, + 0.182821, + 0.0, + 0.0, + 1.0, + -0.041732, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0540_02": [ + 0.760406, + 0.649448, + 0.0, + -6.28167, + -0.649448, + 0.760406, + 0.0, + -0.755105, + 0.0, + 0.0, + 1.0, + -0.068634, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0541_00": [ + -0.608761, + 0.793353, + 0.0, + -0.623516, + -0.793353, + -0.608761, + 0.0, + 3.65471, + 0.0, + 0.0, + 1.0, + -0.083598, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0541_01": [ + -0.390731, + 0.920505, + 0.0, + -1.50128, + -0.920505, + -0.390731, + 0.0, + 3.04507, + 0.0, + 0.0, + 1.0, + -0.066698, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0541_02": [ + -0.803857, + 0.594823, + 0.0, + 0.174321, + -0.594823, + -0.803857, + 0.0, + 3.58655, + 0.0, + 0.0, + 1.0, + -0.117075, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0542_00": [ + 0.104528, + 0.994522, + 0.0, + -2.22358, + -0.994522, + 0.104528, + 0.0, + 1.98236, + 0.0, + 0.0, + 1.0, + -0.048637, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0543_00": [ + 0.992546, + 0.121869, + 0.0, + -3.60878, + -0.121869, + 0.992546, + 0.0, + -3.18336, + 0.0, + 0.0, + 1.0, + -0.053449, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0543_01": [ + -0.325568, + 0.945519, + 0.0, + -1.07744, + -0.945519, + -0.325568, + 0.0, + 4.06738, + 0.0, + 0.0, + 1.0, + -0.107617, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0543_02": [ + -0.300706, + 0.953717, + 0.0, + -0.879466, + -0.953717, + -0.300706, + 0.0, + 7.93139, + 0.0, + 0.0, + 1.0, + -0.062098, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0544_00": [ + -0.21644, + 0.976296, + 0.0, + -1.5154, + -0.976296, + -0.21644, + 0.0, + 3.08362, + 0.0, + 0.0, + 1.0, + -0.04574, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0545_00": [ + 0.113203, + 0.993572, + 0.0, + -2.00278, + -0.993572, + 0.113203, + 0.0, + 1.59761, + 0.0, + 0.0, + 1.0, + -0.026714, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0545_01": [ + 0.121869, + 0.992546, + 0.0, + -2.10976, + -0.992546, + 0.121869, + 0.0, + 2.79535, + 0.0, + 0.0, + 1.0, + -0.044437, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0545_02": [ + 0.824126, + 0.566406, + 0.0, + -3.20871, + -0.566406, + 0.824126, + 0.0, + 0.33697, + 0.0, + 0.0, + 1.0, + -0.023857, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0546_00": [ + 0.992546, + 0.121869, + 0.0, + -1.86454, + -0.121869, + 0.992546, + 0.0, + -2.03267, + 0.0, + 0.0, + 1.0, + -0.173067, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0547_00": [ + -0.258819, + 0.965926, + 0.0, + -3.66134, + -0.965926, + -0.258819, + 0.0, + 3.76165, + 0.0, + 0.0, + 1.0, + -0.159108, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0547_01": [ + -0.113203, + 0.993572, + 0.0, + -4.34946, + -0.993572, + -0.113203, + 0.0, + 3.1779, + 0.0, + 0.0, + 1.0, + -0.200447, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0547_02": [ + -0.87462, + 0.48481, + 0.0, + 0.623392, + -0.48481, + -0.87462, + 0.0, + 3.73188, + 0.0, + 0.0, + 1.0, + -0.14456, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0548_00": [ + 0.043619, + 0.999048, + 0.0, + -1.83121, + -0.999048, + 0.043619, + 0.0, + 1.51695, + 0.0, + 0.0, + 1.0, + -0.117131, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0548_01": [ + -0.990268, + 0.139173, + 0.0, + 3.01193, + -0.139173, + -0.990268, + 0.0, + 2.40131, + 0.0, + 0.0, + 1.0, + -0.051246, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0548_02": [ + -0.991445, + 0.130526, + 0.0, + 1.54001, + -0.130526, + -0.991445, + 0.0, + 1.81714, + 0.0, + 0.0, + 1.0, + -0.056526, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0549_00": [ + 0.199368, + 0.979925, + 0.0, + -4.22216, + -0.979925, + 0.199368, + 0.0, + 2.83916, + 0.0, + 0.0, + 1.0, + -0.065863, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0549_01": [ + 0.052336, + 0.99863, + 0.0, + -5.85654, + -0.99863, + 0.052336, + 0.0, + 3.10774, + 0.0, + 0.0, + 1.0, + -0.051854, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0550_00": [ + 0.999391, + 0.034899, + 0.0, + -2.7436, + -0.034899, + 0.999391, + 0.0, + -1.23251, + 0.0, + 0.0, + 1.0, + -0.067863, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0551_00": [ + -0.130526, + 0.991445, + 0.0, + -1.68214, + -0.991445, + -0.130526, + 0.0, + 2.64312, + 0.0, + 0.0, + 1.0, + -0.04102, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0552_00": [ + 0.594823, + 0.803857, + 0.0, + -4.79796, + -0.803857, + 0.594823, + 0.0, + 0.366577, + 0.0, + 0.0, + 1.0, + -0.065122, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0552_01": [ + 0.559193, + 0.829038, + 0.0, + -4.79864, + -0.829038, + 0.559193, + 0.0, + 1.32355, + 0.0, + 0.0, + 1.0, + -0.036911, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0553_00": [ + -0.309017, + 0.951056, + 0.0, + -2.44484, + -0.951056, + -0.309017, + 0.0, + 2.43443, + 0.0, + 0.0, + 1.0, + -0.054122, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0553_01": [ + -0.017452, + 0.999848, + 0.0, + -2.96866, + -0.999848, + -0.017452, + 0.0, + 1.53589, + 0.0, + 0.0, + 1.0, + -0.053701, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0553_02": [ + 0.292372, + 0.956305, + 0.0, + -2.03795, + -0.956305, + 0.292372, + 0.0, + 1.197, + 0.0, + 0.0, + 1.0, + -0.073702, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0554_00": [ + -0.034899, + 0.999391, + 0.0, + -1.31112, + -0.999391, + -0.034899, + 0.0, + 2.15521, + 0.0, + 0.0, + 1.0, + -0.062277, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0554_01": [ + -0.113203, + 0.993572, + 0.0, + -1.29058, + -0.993572, + -0.113203, + 0.0, + 2.25406, + 0.0, + 0.0, + 1.0, + -0.039688, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0555_00": [ + 0.224951, + 0.97437, + 0.0, + -3.84466, + -0.97437, + 0.224951, + 0.0, + 3.07788, + 0.0, + 0.0, + 1.0, + -0.862582, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0556_00": [ + -0.043619, + 0.999048, + 0.0, + -2.61504, + -0.999048, + -0.043619, + 0.0, + 3.88481, + 0.0, + 0.0, + 1.0, + -0.058283, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0556_01": [ + 0.951057, + 0.309017, + 0.0, + -4.54606, + -0.309017, + 0.951057, + 0.0, + -1.39245, + 0.0, + 0.0, + 1.0, + -0.088547, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0557_00": [ + 0.350207, + 0.936672, + 0.0, + -5.01003, + -0.936672, + 0.350207, + 0.0, + 1.75755, + 0.0, + 0.0, + 1.0, + -0.082942, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0557_01": [ + 0.069757, + 0.997564, + 0.0, + -3.85524, + -0.997564, + 0.069757, + 0.0, + 2.3213, + 0.0, + 0.0, + 1.0, + -0.053301, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0557_02": [ + 0.087156, + 0.996195, + 0.0, + -4.3608, + -0.996195, + 0.087156, + 0.0, + 2.47322, + 0.0, + 0.0, + 1.0, + -0.060839, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0558_00": [ + 0.333807, + 0.942641, + 0.0, + -5.27712, + -0.942641, + 0.333807, + 0.0, + 2.48632, + 0.0, + 0.0, + 1.0, + -0.068043, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0558_01": [ + 0.095846, + 0.995396, + 0.0, + -4.61916, + -0.995396, + 0.095846, + 0.0, + 4.47979, + 0.0, + 0.0, + 1.0, + -0.072031, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0558_02": [ + 0.984808, + 0.173648, + 0.0, + -4.54915, + -0.173648, + 0.984808, + 0.0, + -2.78786, + 0.0, + 0.0, + 1.0, + -0.130438, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0559_00": [ + -0.989016, + 0.147809, + 0.0, + 2.9163, + -0.147809, + -0.989016, + 0.0, + 5.3609, + 0.0, + 0.0, + 1.0, + -0.055172, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0559_01": [ + 0.095846, + 0.995396, + 0.0, + -5.98904, + -0.995396, + 0.095846, + 0.0, + 2.5822, + 0.0, + 0.0, + 1.0, + -0.036386, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0559_02": [ + 0.814116, + 0.580703, + 0.0, + -4.24149, + -0.580703, + 0.814116, + 0.0, + -0.717865, + 0.0, + 0.0, + 1.0, + -0.058589, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0560_00": [ + 0.990268, + 0.139173, + 0.0, + -2.17586, + -0.139173, + 0.990268, + 0.0, + -1.95961, + 0.0, + 0.0, + 1.0, + -0.138827, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0561_00": [ + -0.113203, + 0.993572, + 0.0, + -1.96112, + -0.993572, + -0.113203, + 0.0, + 4.88483, + 0.0, + 0.0, + 1.0, + -1.10799, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0561_01": [ + 0.92388, + 0.382683, + 0.0, + -3.8536, + -0.382683, + 0.92388, + 0.0, + -3.22662, + 0.0, + 0.0, + 1.0, + -2.00513, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0562_00": [ + -0.034899, + 0.999391, + 0.0, + -4.18132, + -0.999391, + -0.034899, + 0.0, + 4.1309, + 0.0, + 0.0, + 1.0, + -0.037156, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0563_00": [ + -0.061049, + 0.998135, + 0.0, + -1.94055, + -0.998135, + -0.061049, + 0.0, + 3.22437, + 0.0, + 0.0, + 1.0, + -0.040739, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0564_00": [ + -0.824126, + 0.566406, + 0.0, + 0.640465, + -0.566406, + -0.824126, + 0.0, + 2.2163, + 0.0, + 0.0, + 1.0, + -0.040084, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0565_00": [ + -0.121869, + 0.992546, + 0.0, + -2.23807, + -0.992546, + -0.121869, + 0.0, + 4.44122, + 0.0, + 0.0, + 1.0, + -0.046461, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0566_00": [ + -0.798635, + 0.601815, + 0.0, + 1.06528, + -0.601815, + -0.798635, + 0.0, + 6.39223, + 0.0, + 0.0, + 1.0, + -0.025602, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0567_00": [ + 0.446198, + 0.894934, + 0.0, + -3.93711, + -0.894934, + 0.446198, + 0.0, + 1.14313, + 0.0, + 0.0, + 1.0, + -0.111004, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0567_01": [ + -0.008727, + 0.999962, + 0.0, + -2.02086, + -0.999962, + -0.008727, + 0.0, + 2.7109, + 0.0, + 0.0, + 1.0, + -0.116226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0568_00": [ + -0.999657, + 0.026177, + 0.0, + 2.82204, + -0.026177, + -0.999657, + 0.0, + 3.74026, + 0.0, + 0.0, + 1.0, + -0.80733, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0568_01": [ + 0.961262, + 0.275637, + 0.0, + -4.11274, + -0.275637, + 0.961262, + 0.0, + -2.83746, + 0.0, + 0.0, + 1.0, + -0.848935, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0568_02": [ + 0.965926, + 0.258819, + 0.0, + -3.92955, + -0.258819, + 0.965926, + 0.0, + -2.8717, + 0.0, + 0.0, + 1.0, + -0.648554, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0569_00": [ + 0.061049, + 0.998135, + 0.0, + -3.63424, + -0.998135, + 0.061049, + 0.0, + 2.86353, + 0.0, + 0.0, + 1.0, + -0.043809, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0569_01": [ + -0.0, + 1.0, + 0.0, + -3.62591, + -1.0, + -0.0, + 0.0, + 3.36757, + 0.0, + 0.0, + 1.0, + -0.093445, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0570_00": [ + -0.333807, + 0.942641, + 0.0, + -0.03425, + -0.942641, + -0.333807, + 0.0, + 3.53283, + 0.0, + 0.0, + 1.0, + -0.021274, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0570_01": [ + 0.139173, + 0.990268, + 0.0, + -1.58102, + -0.990268, + 0.139173, + 0.0, + 1.75468, + 0.0, + 0.0, + 1.0, + -0.023389, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0570_02": [ + 0.999657, + 0.026177, + 0.0, + -2.15438, + -0.026177, + 0.999657, + 0.0, + -1.98884, + 0.0, + 0.0, + 1.0, + -0.032252, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0571_00": [ + -0.979925, + 0.199368, + 0.0, + 1.21954, + -0.199368, + -0.979925, + 0.0, + 4.87641, + 0.0, + 0.0, + 1.0, + -0.08489, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0571_01": [ + -0.993572, + 0.113203, + 0.0, + 1.54286, + -0.113203, + -0.993572, + 0.0, + 4.44323, + 0.0, + 0.0, + 1.0, + -0.097638, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0572_00": [ + -0.333807, + 0.942641, + 0.0, + -1.78507, + -0.942641, + -0.333807, + 0.0, + 5.6554, + 0.0, + 0.0, + 1.0, + -0.073832, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0572_01": [ + -0.300706, + 0.953717, + 0.0, + -1.67406, + -0.953717, + -0.300706, + 0.0, + 5.51796, + 0.0, + 0.0, + 1.0, + -0.080015, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0572_02": [ + -0.87462, + 0.48481, + 0.0, + 1.55386, + -0.48481, + -0.87462, + 0.0, + 6.11858, + 0.0, + 0.0, + 1.0, + -0.093174, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0573_00": [ + -0.936672, + 0.350207, + 0.0, + 2.9336, + -0.350207, + -0.936672, + 0.0, + 5.73701, + 0.0, + 0.0, + 1.0, + -0.022924, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0573_01": [ + -0.165048, + 0.986286, + 0.0, + -0.98146, + -0.986286, + -0.165048, + 0.0, + 4.78358, + 0.0, + 0.0, + 1.0, + -0.02851, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0574_00": [ + -0.999962, + 0.008727, + 0.0, + 3.35009, + -0.008727, + -0.999962, + 0.0, + 3.25765, + 0.0, + 0.0, + 1.0, + -0.044752, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0574_01": [ + -0.026177, + 0.999657, + 0.0, + -3.17634, + -0.999657, + -0.026177, + 0.0, + 1.45396, + 0.0, + 0.0, + 1.0, + -0.035739, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0574_02": [ + -0.999962, + 0.008727, + 0.0, + 2.87181, + -0.008727, + -0.999962, + 0.0, + 3.32798, + 0.0, + 0.0, + 1.0, + -0.105381, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0575_00": [ + -0.956305, + 0.292372, + 0.0, + 1.30756, + -0.292372, + -0.956305, + 0.0, + 4.31008, + 0.0, + 0.0, + 1.0, + -0.058603, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0575_01": [ + 0.199368, + 0.979925, + 0.0, + -2.88188, + -0.979925, + 0.199368, + 0.0, + 2.62657, + 0.0, + 0.0, + 1.0, + -0.046865, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0575_02": [ + 0.636078, + 0.771625, + 0.0, + -4.64735, + -0.771625, + 0.636078, + 0.0, + 0.473332, + 0.0, + 0.0, + 1.0, + -0.055074, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0576_00": [ + -0.936672, + 0.350207, + 0.0, + 1.25827, + -0.350207, + -0.936672, + 0.0, + 3.7022, + 0.0, + 0.0, + 1.0, + -0.188644, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0576_01": [ + -0.25038, + 0.968148, + 0.0, + -1.57399, + -0.968148, + -0.25038, + 0.0, + 3.39464, + 0.0, + 0.0, + 1.0, + -0.1248, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0576_02": [ + -0.777146, + 0.629321, + 0.0, + 0.656631, + -0.629321, + -0.777146, + 0.0, + 3.81111, + 0.0, + 0.0, + 1.0, + -0.099047, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0577_00": [ + -0.999962, + 0.008727, + 0.0, + 3.17448, + -0.008727, + -0.999962, + 0.0, + 2.41959, + 0.0, + 0.0, + 1.0, + -0.066842, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0578_00": [ + -0.0, + 1.0, + 0.0, + -3.5868, + -1.0, + -0.0, + 0.0, + 3.52514, + 0.0, + 0.0, + 1.0, + -0.054826, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0578_01": [ + -0.130526, + 0.991445, + 0.0, + -3.1966, + -0.991445, + -0.130526, + 0.0, + 3.16792, + 0.0, + 0.0, + 1.0, + -0.035838, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0578_02": [ + 0.998135, + 0.061049, + 0.0, + -2.00355, + -0.061049, + 0.998135, + 0.0, + -3.93635, + 0.0, + 0.0, + 1.0, + -0.045808, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0579_00": [ + -0.809017, + 0.587785, + 0.0, + 1.42709, + -0.587785, + -0.809017, + 0.0, + 3.92469, + 0.0, + 0.0, + 1.0, + -0.044911, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0579_01": [ + -0.760406, + 0.649448, + 0.0, + 1.60594, + -0.649448, + -0.760406, + 0.0, + 4.47507, + 0.0, + 0.0, + 1.0, + -0.028061, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0579_02": [ + -0.996917, + 0.078459, + 0.0, + 3.86455, + -0.078459, + -0.996917, + 0.0, + 2.19852, + 0.0, + 0.0, + 1.0, + -0.033268, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0580_00": [ + -0.267238, + 0.96363, + 0.0, + -1.32846, + -0.96363, + -0.267238, + 0.0, + 2.7503, + 0.0, + 0.0, + 1.0, + -0.892079, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0580_01": [ + -0.809017, + 0.587785, + 0.0, + 0.322301, + -0.587785, + -0.809017, + 0.0, + 3.86432, + 0.0, + 0.0, + 1.0, + -0.981629, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0581_00": [ + -0.078459, + 0.996917, + 0.0, + -1.79384, + -0.996917, + -0.078459, + 0.0, + 5.10522, + 0.0, + 0.0, + 1.0, + -0.106674, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0581_01": [ + -0.507538, + 0.861629, + 0.0, + 0.342234, + -0.861629, + -0.507538, + 0.0, + 5.36232, + 0.0, + 0.0, + 1.0, + -0.09907, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0581_02": [ + -0.224951, + 0.97437, + 0.0, + -1.10438, + -0.97437, + -0.224951, + 0.0, + 5.44992, + 0.0, + 0.0, + 1.0, + -0.08516, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0582_00": [ + 0.358368, + 0.93358, + 0.0, + -4.2788, + -0.93358, + 0.358368, + 0.0, + 3.05107, + 0.0, + 0.0, + 1.0, + -0.048892, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0582_01": [ + -0.838671, + 0.544639, + 0.0, + 0.715151, + -0.544639, + -0.838671, + 0.0, + 3.97649, + 0.0, + 0.0, + 1.0, + -0.04512, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0582_02": [ + 0.989016, + 0.147809, + 0.0, + -2.42739, + -0.147809, + 0.989016, + 0.0, + -3.37996, + 0.0, + 0.0, + 1.0, + -0.059909, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0583_00": [ + -0.017452, + 0.999848, + 0.0, + -1.77876, + -0.999848, + -0.017452, + 0.0, + 2.3151, + 0.0, + 0.0, + 1.0, + -1.09363, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0583_01": [ + 0.987688, + 0.156434, + 0.0, + -1.88127, + -0.156434, + 0.987688, + 0.0, + -0.987546, + 0.0, + 0.0, + 1.0, + -0.112958, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0583_02": [ + -0.165048, + 0.986286, + 0.0, + -1.30969, + -0.986286, + -0.165048, + 0.0, + 1.898, + 0.0, + 0.0, + 1.0, + -0.126945, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0584_00": [ + 0.139173, + 0.990268, + 0.0, + -3.04734, + -0.990268, + 0.139173, + 0.0, + 6.43581, + 0.0, + 0.0, + 1.0, + -0.047948, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0584_01": [ + -0.156434, + 0.987688, + 0.0, + -1.32293, + -0.987688, + -0.156434, + 0.0, + 5.76714, + 0.0, + 0.0, + 1.0, + -0.079686, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0584_02": [ + -0.069757, + 0.997564, + 0.0, + -1.35252, + -0.997564, + -0.069757, + 0.0, + 3.77404, + 0.0, + 0.0, + 1.0, + -0.042376, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0585_00": [ + -0.430511, + 0.902585, + 0.0, + -0.62104, + -0.902585, + -0.430511, + 0.0, + 4.48396, + 0.0, + 0.0, + 1.0, + -0.120271, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0585_01": [ + -0.069757, + 0.997564, + 0.0, + -3.86803, + -0.997564, + -0.069757, + 0.0, + 2.03058, + 0.0, + 0.0, + 1.0, + -0.107154, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0586_00": [ + -0.069757, + 0.997564, + 0.0, + -1.0323, + -0.997564, + -0.069757, + 0.0, + 2.306, + 0.0, + 0.0, + 1.0, + -0.028746, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0586_01": [ + -0.0, + 1.0, + 0.0, + -1.33688, + -1.0, + -0.0, + 0.0, + 4.54121, + 0.0, + 0.0, + 1.0, + -0.046751, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0586_02": [ + -0.078459, + 0.996917, + 0.0, + -1.06538, + -0.996917, + -0.078459, + 0.0, + 4.24626, + 0.0, + 0.0, + 1.0, + -0.076414, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0587_00": [ + 0.71934, + 0.694658, + 0.0, + -4.55115, + -0.694658, + 0.71934, + 0.0, + 0.483305, + 0.0, + 0.0, + 1.0, + -0.026211, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0587_01": [ + 0.978148, + 0.207912, + 0.0, + -4.0762, + -0.207912, + 0.978148, + 0.0, + -2.89758, + 0.0, + 0.0, + 1.0, + -0.023631, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0587_02": [ + 0.350207, + 0.936672, + 0.0, + -4.81046, + -0.936672, + 0.350207, + 0.0, + 3.68807, + 0.0, + 0.0, + 1.0, + -0.047616, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0587_03": [ + -0.948324, + 0.317305, + 0.0, + 3.56485, + -0.317305, + -0.948324, + 0.0, + 5.79822, + 0.0, + 0.0, + 1.0, + -0.026197, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0588_00": [ + -0.996917, + 0.078459, + 0.0, + 4.04708, + -0.078459, + -0.996917, + 0.0, + 6.31382, + 0.0, + 0.0, + 1.0, + -0.082224, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0588_01": [ + 0.990268, + 0.139173, + 0.0, + -6.50252, + -0.139173, + 0.990268, + 0.0, + -5.2022, + 0.0, + 0.0, + 1.0, + -0.072561, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0588_02": [ + 0.71934, + 0.694658, + 0.0, + -8.77118, + -0.694658, + 0.71934, + 0.0, + 0.206728, + 0.0, + 0.0, + 1.0, + -0.259236, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0588_03": [ + -0.91706, + 0.398749, + 0.0, + 2.76426, + -0.398749, + -0.91706, + 0.0, + 8.18733, + 0.0, + 0.0, + 1.0, + -0.372099, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0589_00": [ + 0.898794, + 0.438371, + 0.0, + -3.59389, + -0.438371, + 0.898794, + 0.0, + -1.52755, + 0.0, + 0.0, + 1.0, + -0.086327, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0589_01": [ + -0.147809, + 0.989016, + 0.0, + -1.39477, + -0.989016, + -0.147809, + 0.0, + 3.22172, + 0.0, + 0.0, + 1.0, + -0.056877, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0589_02": [ + 0.694658, + 0.71934, + 0.0, + -3.8247, + -0.71934, + 0.694658, + 0.0, + 0.553368, + 0.0, + 0.0, + 1.0, + -0.104675, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0590_00": [ + -0.325568, + 0.945519, + 0.0, + -1.46916, + -0.945519, + -0.325568, + 0.0, + 5.2932, + 0.0, + 0.0, + 1.0, + -0.085132, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0590_01": [ + -0.559193, + 0.829038, + 0.0, + -0.362295, + -0.829038, + -0.559193, + 0.0, + 5.20033, + 0.0, + 0.0, + 1.0, + -0.151666, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0591_00": [ + -0.422618, + 0.906308, + 0.0, + -1.05716, + -0.906308, + -0.422618, + 0.0, + 3.56217, + 0.0, + 0.0, + 1.0, + -0.035947, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0591_01": [ + -0.284015, + 0.95882, + 0.0, + -1.42676, + -0.95882, + -0.284015, + 0.0, + 3.44271, + 0.0, + 0.0, + 1.0, + -0.078707, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0591_02": [ + -0.130526, + 0.991445, + 0.0, + -1.46102, + -0.991445, + -0.130526, + 0.0, + 4.55413, + 0.0, + 0.0, + 1.0, + -0.044922, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0592_00": [ + 0.492424, + 0.870356, + 0.0, + -7.12233, + -0.870356, + 0.492424, + 0.0, + 1.75845, + 0.0, + 0.0, + 1.0, + -0.051801, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0592_01": [ + 0.5373, + 0.843391, + 0.0, + -7.28462, + -0.843391, + 0.5373, + 0.0, + 1.55586, + 0.0, + 0.0, + 1.0, + -0.072863, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0593_00": [ + 0.069757, + 0.997564, + 0.0, + -2.47824, + -0.997564, + 0.069757, + 0.0, + 2.976, + 0.0, + 0.0, + 1.0, + -0.110468, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0593_01": [ + 0.113203, + 0.993572, + 0.0, + -3.08569, + -0.993572, + 0.113203, + 0.0, + 4.73675, + 0.0, + 0.0, + 1.0, + -0.119808, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0594_00": [ + 0.165048, + 0.986286, + 0.0, + -4.99469, + -0.986286, + 0.165048, + 0.0, + 2.05305, + 0.0, + 0.0, + 1.0, + -0.027832, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0595_00": [ + 0.095846, + 0.995396, + 0.0, + -2.08791, + -0.995396, + 0.095846, + 0.0, + 4.00115, + 0.0, + 0.0, + 1.0, + -0.040173, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0596_00": [ + -0.422618, + 0.906308, + 0.0, + -1.73037, + -0.906308, + -0.422618, + 0.0, + 3.8649, + 0.0, + 0.0, + 1.0, + -0.104409, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0596_01": [ + -0.25038, + 0.968148, + 0.0, + -1.90343, + -0.968148, + -0.25038, + 0.0, + 2.99393, + 0.0, + 0.0, + 1.0, + -0.055224, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0596_02": [ + -0.848048, + 0.529919, + 0.0, + 0.596304, + -0.529919, + -0.848048, + 0.0, + 4.10526, + 0.0, + 0.0, + 1.0, + -0.08632, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0597_00": [ + 0.999391, + 0.034899, + 0.0, + -3.23145, + -0.034899, + 0.999391, + 0.0, + -2.82025, + 0.0, + 0.0, + 1.0, + -0.034938, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0597_01": [ + -0.139173, + 0.990268, + 0.0, + -2.4863, + -0.990268, + -0.139173, + 0.0, + 3.7253, + 0.0, + 0.0, + 1.0, + -0.066289, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0597_02": [ + 0.999048, + 0.043619, + 0.0, + -3.32843, + -0.043619, + 0.999048, + 0.0, + -3.37168, + 0.0, + 0.0, + 1.0, + -0.056199, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0598_00": [ + 0.92388, + 0.382683, + 0.0, + -5.14609, + -0.382683, + 0.92388, + 0.0, + -1.53938, + 0.0, + 0.0, + 1.0, + -0.101174, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0598_01": [ + 0.725374, + 0.688355, + 0.0, + -6.82682, + -0.688355, + 0.725374, + 0.0, + -0.775952, + 0.0, + 0.0, + 1.0, + -0.023576, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0598_02": [ + 0.087156, + 0.996195, + 0.0, + -4.55715, + -0.996195, + 0.087156, + 0.0, + 4.1518, + 0.0, + 0.0, + 1.0, + -0.096583, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0599_00": [ + -0.034899, + 0.999391, + 0.0, + -3.40028, + -0.999391, + -0.034899, + 0.0, + 2.99535, + 0.0, + 0.0, + 1.0, + -0.047506, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0599_01": [ + 0.078459, + 0.996917, + 0.0, + -3.74175, + -0.996917, + 0.078459, + 0.0, + 2.3052, + 0.0, + 0.0, + 1.0, + -0.038957, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0599_02": [ + -0.156434, + 0.987688, + 0.0, + -2.95317, + -0.987688, + -0.156434, + 0.0, + 3.87422, + 0.0, + 0.0, + 1.0, + -0.038063, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0600_00": [ + 0.233445, + 0.97237, + 0.0, + -2.48315, + -0.97237, + 0.233445, + 0.0, + 2.14047, + 0.0, + 0.0, + 1.0, + -0.124858, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0600_01": [ + 0.390731, + 0.920505, + 0.0, + -3.21785, + -0.920505, + 0.390731, + 0.0, + 2.09742, + 0.0, + 0.0, + 1.0, + -0.096225, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0600_02": [ + 0.113203, + 0.993572, + 0.0, + -2.45303, + -0.993572, + 0.113203, + 0.0, + 2.86569, + 0.0, + 0.0, + 1.0, + -0.082829, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0601_00": [ + -0.48481, + 0.87462, + 0.0, + -1.13133, + -0.87462, + -0.48481, + 0.0, + 5.08156, + 0.0, + 0.0, + 1.0, + -0.028024, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0601_01": [ + -0.325568, + 0.945519, + 0.0, + -1.49946, + -0.945519, + -0.325568, + 0.0, + 5.45328, + 0.0, + 0.0, + 1.0, + -0.045373, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0602_00": [ + -0.292372, + 0.956305, + 0.0, + -0.571878, + -0.956305, + -0.292372, + 0.0, + 2.6268, + 0.0, + 0.0, + 1.0, + -0.041551, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0603_00": [ + 0.113203, + 0.993572, + 0.0, + -3.8007, + -0.993572, + 0.113203, + 0.0, + 2.58082, + 0.0, + 0.0, + 1.0, + -0.802339, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0603_01": [ + 0.052336, + 0.99863, + 0.0, + -3.77632, + -0.99863, + 0.052336, + 0.0, + 2.64639, + 0.0, + 0.0, + 1.0, + -1.11813, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0604_00": [ + 0.707107, + 0.707107, + 0.0, + -5.6755, + -0.707107, + 0.707107, + 0.0, + -0.320495, + 0.0, + 0.0, + 1.0, + -0.049941, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0604_01": [ + -0.008727, + 0.999962, + 0.0, + -3.14595, + -0.999962, + -0.008727, + 0.0, + 4.30833, + 0.0, + 0.0, + 1.0, + -0.069352, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0604_02": [ + 0.284015, + 0.95882, + 0.0, + -4.03809, + -0.95882, + 0.284015, + 0.0, + 2.09597, + 0.0, + 0.0, + 1.0, + -0.046513, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0605_00": [ + -0.998135, + 0.061049, + 0.0, + 2.23525, + -0.061049, + -0.998135, + 0.0, + 3.67836, + 0.0, + 0.0, + 1.0, + -0.274089, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0605_01": [ + -0.866025, + 0.5, + 0.0, + 1.56844, + -0.5, + -0.866025, + 0.0, + 5.00497, + 0.0, + 0.0, + 1.0, + -0.122757, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0606_00": [ + 0.970296, + 0.241922, + 0.0, + -3.27039, + -0.241922, + 0.970296, + 0.0, + -4.41551, + 0.0, + 0.0, + 1.0, + -0.065001, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0606_01": [ + 0.956305, + 0.292372, + 0.0, + -3.46606, + -0.292372, + 0.956305, + 0.0, + -4.22241, + 0.0, + 0.0, + 1.0, + -0.102786, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0606_02": [ + -0.814116, + 0.580703, + 0.0, + 2.04773, + -0.580703, + -0.814116, + 0.0, + 5.04649, + 0.0, + 0.0, + 1.0, + -0.108714, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0607_00": [ + -0.97237, + 0.233445, + 0.0, + 1.04118, + -0.233445, + -0.97237, + 0.0, + 4.4266, + 0.0, + 0.0, + 1.0, + -0.047063, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0607_01": [ + -0.945519, + 0.325568, + 0.0, + 0.354171, + -0.325568, + -0.945519, + 0.0, + 4.25694, + 0.0, + 0.0, + 1.0, + -0.032209, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0608_00": [ + -0.992546, + 0.121869, + 0.0, + 1.79062, + -0.121869, + -0.992546, + 0.0, + 5.28396, + 0.0, + 0.0, + 1.0, + -0.165025, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0608_01": [ + 0.573576, + 0.819152, + 0.0, + -4.60779, + -0.819152, + 0.573576, + 0.0, + 0.09138, + 0.0, + 0.0, + 1.0, + -0.145032, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0608_02": [ + 0.979925, + 0.199368, + 0.0, + -3.36287, + -0.199368, + 0.979925, + 0.0, + -3.76859, + 0.0, + 0.0, + 1.0, + -0.189877, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0609_00": [ + 0.043619, + 0.999048, + 0.0, + -1.68477, + -0.999048, + 0.043619, + 0.0, + 2.67506, + 0.0, + 0.0, + 1.0, + -0.031502, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0609_01": [ + -0.087156, + 0.996195, + 0.0, + -1.85885, + -0.996195, + -0.087156, + 0.0, + 4.48699, + 0.0, + 0.0, + 1.0, + -0.032904, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0609_02": [ + -0.71934, + 0.694658, + 0.0, + -0.508627, + -0.694658, + -0.71934, + 0.0, + 7.17727, + 0.0, + 0.0, + 1.0, + -0.056863, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0609_03": [ + 0.566406, + 0.824126, + 0.0, + -3.86051, + -0.824126, + 0.566406, + 0.0, + 0.831495, + 0.0, + 0.0, + 1.0, + -0.059769, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0610_00": [ + -0.017452, + 0.999848, + 0.0, + -0.847024, + -0.999848, + -0.017452, + 0.0, + 1.31614, + 0.0, + 0.0, + 1.0, + -0.040952, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0610_01": [ + -0.052336, + 0.99863, + 0.0, + -1.24332, + -0.99863, + -0.052336, + 0.0, + 1.17578, + 0.0, + 0.0, + 1.0, + -0.040388, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0610_02": [ + 0.034899, + 0.999391, + 0.0, + -2.04536, + -0.999391, + 0.034899, + 0.0, + 1.09723, + 0.0, + 0.0, + 1.0, + -0.043711, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0611_00": [ + -0.580703, + 0.814116, + 0.0, + -1.01662, + -0.814116, + -0.580703, + 0.0, + 4.39734, + 0.0, + 0.0, + 1.0, + -0.126821, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0611_01": [ + 0.566406, + 0.824126, + 0.0, + -4.44287, + -0.824126, + 0.566406, + 0.0, + 1.8944, + 0.0, + 0.0, + 1.0, + -0.051616, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0612_00": [ + 0.25038, + 0.968148, + 0.0, + -3.18402, + -0.968148, + 0.25038, + 0.0, + 1.20615, + 0.0, + 0.0, + 1.0, + -0.107796, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0612_01": [ + 0.446198, + 0.894934, + 0.0, + -3.14595, + -0.894934, + 0.446198, + 0.0, + 0.596956, + 0.0, + 0.0, + 1.0, + -0.053656, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0613_00": [ + 0.956305, + 0.292372, + 0.0, + -3.21023, + -0.292372, + 0.956305, + 0.0, + -3.34244, + 0.0, + 0.0, + 1.0, + -0.12391, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0613_01": [ + -0.104529, + 0.994522, + 0.0, + -1.25332, + -0.994522, + -0.104529, + 0.0, + 3.8595, + 0.0, + 0.0, + 1.0, + -0.101085, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0613_02": [ + -0.21644, + 0.976296, + 0.0, + -1.10753, + -0.976296, + -0.21644, + 0.0, + 3.51967, + 0.0, + 0.0, + 1.0, + -0.13279, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0614_00": [ + 0.284015, + 0.95882, + 0.0, + -3.35345, + -0.95882, + 0.284015, + 0.0, + 2.28477, + 0.0, + 0.0, + 1.0, + -0.06653, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0614_01": [ + 0.798636, + 0.601815, + 0.0, + -4.51843, + -0.601815, + 0.798636, + 0.0, + -1.18907, + 0.0, + 0.0, + 1.0, + -0.103691, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0614_02": [ + -0.97437, + 0.224951, + 0.0, + 1.50798, + -0.224951, + -0.97437, + 0.0, + 4.07561, + 0.0, + 0.0, + 1.0, + -0.121269, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0615_00": [ + 0.173648, + 0.984808, + 0.0, + -4.52156, + -0.984808, + 0.173648, + 0.0, + 1.59856, + 0.0, + 0.0, + 1.0, + -0.121807, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0615_01": [ + 0.121869, + 0.992546, + 0.0, + -3.51831, + -0.992546, + 0.121869, + 0.0, + 1.58896, + 0.0, + 0.0, + 1.0, + -0.09779, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0616_00": [ + -0.983255, + 0.182236, + 0.0, + 1.61427, + -0.182236, + -0.983255, + 0.0, + 2.89867, + 0.0, + 0.0, + 1.0, + -0.044895, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0616_01": [ + 0.75471, + 0.656059, + 0.0, + -4.28281, + -0.656059, + 0.75471, + 0.0, + -0.655593, + 0.0, + 0.0, + 1.0, + -0.04795, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0617_00": [ + 0.71325, + 0.700909, + 0.0, + -4.77674, + -0.700909, + 0.71325, + 0.0, + 0.537272, + 0.0, + 0.0, + 1.0, + -0.071018, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0618_00": [ + -0.793353, + 0.608761, + 0.0, + 0.707891, + -0.608761, + -0.793353, + 0.0, + 5.89118, + 0.0, + 0.0, + 1.0, + -0.054273, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0619_00": [ + 0.147809, + 0.989016, + 0.0, + -2.67379, + -0.989016, + 0.147809, + 0.0, + 2.64766, + 0.0, + 0.0, + 1.0, + -0.029502, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0620_00": [ + 0.461749, + 0.887011, + 0.0, + -2.32755, + -0.887011, + 0.461749, + 0.0, + 0.827216, + 0.0, + 0.0, + 1.0, + -0.043763, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0620_01": [ + -0.139173, + 0.990268, + 0.0, + -1.79247, + -0.990268, + -0.139173, + 0.0, + 1.59576, + 0.0, + 0.0, + 1.0, + -0.031849, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0621_00": [ + -0.366501, + 0.930418, + 0.0, + -1.55484, + -0.930418, + -0.366501, + 0.0, + 4.67985, + 0.0, + 0.0, + 1.0, + -0.09829, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0622_00": [ + 0.026177, + 0.999657, + 0.0, + -4.30812, + -0.999657, + 0.026177, + 0.0, + 3.98275, + 0.0, + 0.0, + 1.0, + -0.047081, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0622_01": [ + 0.069757, + 0.997564, + 0.0, + -4.24018, + -0.997564, + 0.069757, + 0.0, + 2.74169, + 0.0, + 0.0, + 1.0, + -0.055483, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0623_00": [ + -0.743145, + 0.669131, + 0.0, + -0.489994, + -0.669131, + -0.743145, + 0.0, + 4.90762, + 0.0, + 0.0, + 1.0, + -0.076168, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0623_01": [ + 0.902585, + 0.430511, + 0.0, + -3.4205, + -0.430511, + 0.902585, + 0.0, + -1.75267, + 0.0, + 0.0, + 1.0, + -0.044722, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0624_00": [ + -0.920505, + 0.390731, + 0.0, + 1.78389, + -0.390731, + -0.920505, + 0.0, + 5.99228, + 0.0, + 0.0, + 1.0, + -0.127575, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0625_00": [ + 0.998135, + 0.061049, + 0.0, + -1.1384, + -0.061049, + 0.998135, + 0.0, + -0.748359, + 0.0, + 0.0, + 1.0, + -0.060803, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0625_01": [ + -0.0, + 1.0, + 0.0, + -0.709055, + -1.0, + -0.0, + 0.0, + 0.956108, + 0.0, + 0.0, + 1.0, + -0.055384, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0626_00": [ + -0.857167, + 0.515038, + 0.0, + 0.995046, + -0.515038, + -0.857167, + 0.0, + 3.56396, + 0.0, + 0.0, + 1.0, + -0.038546, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0626_01": [ + -0.438371, + 0.898794, + 0.0, + -0.768953, + -0.898794, + -0.438371, + 0.0, + 3.40289, + 0.0, + 0.0, + 1.0, + -0.042935, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0626_02": [ + 0.25038, + 0.968148, + 0.0, + -3.42728, + -0.968148, + 0.25038, + 0.0, + 2.93313, + 0.0, + 0.0, + 1.0, + -0.038082, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0627_00": [ + 0.656059, + 0.75471, + 0.0, + -7.52151, + -0.75471, + 0.656059, + 0.0, + 0.384957, + 0.0, + 0.0, + 1.0, + -0.05272, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0627_01": [ + -0.095846, + 0.995396, + 0.0, + -2.15212, + -0.995396, + -0.095846, + 0.0, + 4.42762, + 0.0, + 0.0, + 1.0, + -0.068782, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0628_00": [ + 0.147809, + 0.989016, + 0.0, + -2.14772, + -0.989016, + 0.147809, + 0.0, + 2.02372, + 0.0, + 0.0, + 1.0, + -0.037842, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0628_01": [ + 0.156434, + 0.987688, + 0.0, + -2.36515, + -0.987688, + 0.156434, + 0.0, + 2.29071, + 0.0, + 0.0, + 1.0, + -0.020755, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0628_02": [ + 0.173648, + 0.984808, + 0.0, + -2.2987, + -0.984808, + 0.173648, + 0.0, + 2.14796, + 0.0, + 0.0, + 1.0, + -0.018928, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0629_00": [ + -0.71325, + 0.700909, + 0.0, + 0.042756, + -0.700909, + -0.71325, + 0.0, + 4.76551, + 0.0, + 0.0, + 1.0, + -0.559411, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0629_01": [ + -0.061049, + 0.998135, + 0.0, + -2.13374, + -0.998135, + -0.061049, + 0.0, + 3.10332, + 0.0, + 0.0, + 1.0, + -0.618592, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0629_02": [ + 0.008727, + 0.999962, + 0.0, + -2.26252, + -0.999962, + 0.008727, + 0.0, + 2.8186, + 0.0, + 0.0, + 1.0, + -0.527068, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_00": [ + 0.156434, + 0.987688, + 0.0, + -2.21078, + -0.987688, + 0.156434, + 0.0, + 1.74447, + 0.0, + 0.0, + 1.0, + -0.106912, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_01": [ + -0.190809, + 0.981627, + 0.0, + -1.1306, + -0.981627, + -0.190809, + 0.0, + 3.09303, + 0.0, + 0.0, + 1.0, + -0.074128, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_02": [ + -0.026177, + 0.999657, + 0.0, + -1.34375, + -0.999657, + -0.026177, + 0.0, + 2.63043, + 0.0, + 0.0, + 1.0, + -0.081329, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_03": [ + 0.156434, + 0.987688, + 0.0, + -1.95367, + -0.987688, + 0.156434, + 0.0, + 2.67128, + 0.0, + 0.0, + 1.0, + -0.116498, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_04": [ + 0.104528, + 0.994522, + 0.0, + -1.71924, + -0.994522, + 0.104528, + 0.0, + 2.92096, + 0.0, + 0.0, + 1.0, + -0.156685, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_05": [ + 0.422618, + 0.906308, + 0.0, + -3.69574, + -0.906308, + 0.422618, + 0.0, + 2.22939, + 0.0, + 0.0, + 1.0, + -0.070884, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0630_06": [ + -0.113203, + 0.993572, + 0.0, + -1.17875, + -0.993572, + -0.113203, + 0.0, + 4.06828, + 0.0, + 0.0, + 1.0, + -0.091226, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0631_00": [ + -0.130526, + 0.991445, + 0.0, + -4.05376, + -0.991445, + -0.130526, + 0.0, + 4.08825, + 0.0, + 0.0, + 1.0, + -0.070688, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0631_01": [ + 0.766044, + 0.642788, + 0.0, + -4.81116, + -0.642788, + 0.766044, + 0.0, + -1.2996, + 0.0, + 0.0, + 1.0, + -0.037736, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0631_02": [ + -0.026177, + 0.999657, + 0.0, + -2.94742, + -0.999657, + -0.026177, + 0.0, + 2.10512, + 0.0, + 0.0, + 1.0, + -0.085389, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0632_00": [ + 0.113203, + 0.993572, + 0.0, + -2.84272, + -0.993572, + 0.113203, + 0.0, + 2.93186, + 0.0, + 0.0, + 1.0, + -0.035355, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0633_00": [ + 0.430511, + 0.902585, + 0.0, + -2.71148, + -0.902585, + 0.430511, + 0.0, + 0.715545, + 0.0, + 0.0, + 1.0, + -0.052433, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0633_01": [ + 0.207912, + 0.978148, + 0.0, + -2.69838, + -0.978148, + 0.207912, + 0.0, + 0.894511, + 0.0, + 0.0, + 1.0, + -0.068592, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0634_00": [ + 0.737277, + 0.67559, + 0.0, + -5.51876, + -0.67559, + 0.737277, + 0.0, + 0.09904, + 0.0, + 0.0, + 1.0, + -0.083367, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0635_00": [ + 0.034899, + 0.999391, + 0.0, + -1.70829, + -0.999391, + 0.034899, + 0.0, + 2.06169, + 0.0, + 0.0, + 1.0, + -0.278353, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0635_01": [ + 0.026177, + 0.999657, + 0.0, + -1.61724, + -0.999657, + 0.026177, + 0.0, + 2.05494, + 0.0, + 0.0, + 1.0, + -0.272143, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0636_00": [ + -0.95882, + 0.284015, + 0.0, + 0.633699, + -0.284015, + -0.95882, + 0.0, + 1.22229, + 0.0, + 0.0, + 1.0, + -0.238494, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0637_00": [ + -0.848048, + 0.529919, + 0.0, + 0.150983, + -0.529919, + -0.848048, + 0.0, + 4.28938, + 0.0, + 0.0, + 1.0, + -0.056736, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0638_00": [ + 0.309017, + 0.951057, + 0.0, + -4.12305, + -0.951057, + 0.309017, + 0.0, + 2.47404, + 0.0, + 0.0, + 1.0, + -0.039579, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0639_00": [ + -0.224951, + 0.97437, + 0.0, + -2.01629, + -0.97437, + -0.224951, + 0.0, + 2.12159, + 0.0, + 0.0, + 1.0, + -0.220042, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0640_00": [ + -0.999848, + 0.017453, + 0.0, + 1.95251, + -0.017453, + -0.999848, + 0.0, + 4.18108, + 0.0, + 0.0, + 1.0, + -0.033473, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0640_01": [ + -0.121869, + 0.992546, + 0.0, + -2.24641, + -0.992546, + -0.121869, + 0.0, + 4.34503, + 0.0, + 0.0, + 1.0, + -0.054783, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0640_02": [ + -0.061049, + 0.998135, + 0.0, + -2.39389, + -0.998135, + -0.061049, + 0.0, + 4.23276, + 0.0, + 0.0, + 1.0, + -0.028179, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0641_00": [ + 0.515038, + 0.857167, + 0.0, + -5.85591, + -0.857167, + 0.515038, + 0.0, + 0.633349, + 0.0, + 0.0, + 1.0, + -0.081465, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0642_00": [ + 0.078459, + 0.996917, + 0.0, + -2.32851, + -0.996917, + 0.078459, + 0.0, + 4.42266, + 0.0, + 0.0, + 1.0, + -0.138936, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0642_01": [ + 0.095846, + 0.995396, + 0.0, + -2.52513, + -0.995396, + 0.095846, + 0.0, + 4.10343, + 0.0, + 0.0, + 1.0, + -0.105415, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0642_02": [ + 0.788011, + 0.615662, + 0.0, + -4.6872, + -0.615662, + 0.788011, + 0.0, + -1.43388, + 0.0, + 0.0, + 1.0, + -0.082915, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0642_03": [ + -0.622515, + 0.782608, + 0.0, + -0.125012, + -0.782608, + -0.622515, + 0.0, + 4.45317, + 0.0, + 0.0, + 1.0, + -0.078125, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0643_00": [ + -0.241922, + 0.970296, + 0.0, + -1.15246, + -0.970296, + -0.241922, + 0.0, + 4.77533, + 0.0, + 0.0, + 1.0, + -0.046977, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0644_00": [ + 0.559193, + 0.829038, + 0.0, + -6.42676, + -0.829038, + 0.559193, + 0.0, + 1.2216, + 0.0, + 0.0, + 1.0, + -0.071995, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0645_00": [ + 0.66262, + 0.748956, + 0.0, + -6.21777, + -0.748956, + 0.66262, + 0.0, + -0.025965, + 0.0, + 0.0, + 1.0, + -0.054875, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0645_01": [ + 0.438371, + 0.898794, + 0.0, + -5.42221, + -0.898794, + 0.438371, + 0.0, + 2.04957, + 0.0, + 0.0, + 1.0, + -0.045398, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0645_02": [ + 0.469472, + 0.882948, + 0.0, + -5.39099, + -0.882948, + 0.469472, + 0.0, + 1.24582, + 0.0, + 0.0, + 1.0, + -0.03118, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0646_00": [ + 0.994522, + 0.104528, + 0.0, + -3.42666, + -0.104528, + 0.994522, + 0.0, + -3.79479, + 0.0, + 0.0, + 1.0, + -1.35377, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0646_01": [ + -0.993572, + 0.113203, + 0.0, + 2.32671, + -0.113203, + -0.993572, + 0.0, + 4.38086, + 0.0, + 0.0, + 1.0, + -1.44937, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0646_02": [ + 0.983255, + 0.182236, + 0.0, + -3.26296, + -0.182236, + 0.983255, + 0.0, + -3.36766, + 0.0, + 0.0, + 1.0, + -1.72703, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0647_00": [ + -0.636078, + 0.771625, + 0.0, + -0.32816, + -0.771625, + -0.636078, + 0.0, + 5.64387, + 0.0, + 0.0, + 1.0, + -0.083577, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0647_01": [ + -0.976296, + 0.21644, + 0.0, + 1.93151, + -0.21644, + -0.976296, + 0.0, + 5.02957, + 0.0, + 0.0, + 1.0, + -0.063276, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0648_00": [ + -0.414693, + 0.909961, + 0.0, + -0.931786, + -0.909961, + -0.414693, + 0.0, + 2.9269, + 0.0, + 0.0, + 1.0, + -0.112798, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0648_01": [ + -0.414693, + 0.909961, + 0.0, + -0.943282, + -0.909961, + -0.414693, + 0.0, + 2.91857, + 0.0, + 0.0, + 1.0, + -0.121954, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0649_00": [ + -0.034899, + 0.999391, + 0.0, + -0.937478, + -0.999391, + -0.034899, + 0.0, + 1.6641, + 0.0, + 0.0, + 1.0, + -0.172781, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0649_01": [ + -0.043619, + 0.999048, + 0.0, + -0.9229, + -0.999048, + -0.043619, + 0.0, + 1.67728, + 0.0, + 0.0, + 1.0, + -0.225625, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0650_00": [ + 0.406737, + 0.913545, + 0.0, + -3.83156, + -0.913545, + 0.406737, + 0.0, + 1.88333, + 0.0, + 0.0, + 1.0, + -0.155621, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0651_00": [ + 0.275637, + 0.961262, + 0.0, + -3.77431, + -0.961262, + 0.275637, + 0.0, + 1.52941, + 0.0, + 0.0, + 1.0, + -0.075916, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0651_01": [ + 0.113203, + 0.993572, + 0.0, + -2.95664, + -0.993572, + 0.113203, + 0.0, + 1.97154, + 0.0, + 0.0, + 1.0, + -0.068743, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0651_02": [ + 0.857167, + 0.515038, + 0.0, + -3.86136, + -0.515038, + 0.857167, + 0.0, + -2.36553, + 0.0, + 0.0, + 1.0, + -0.046222, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0652_00": [ + -0.095846, + 0.995396, + 0.0, + -3.75229, + -0.995396, + -0.095846, + 0.0, + 3.00376, + 0.0, + 0.0, + 1.0, + -0.046831, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0653_00": [ + -0.156434, + 0.987688, + 0.0, + -2.69776, + -0.987688, + -0.156434, + 0.0, + 3.07016, + 0.0, + 0.0, + 1.0, + -0.140299, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0653_01": [ + 0.358368, + 0.93358, + 0.0, + -4.21235, + -0.93358, + 0.358368, + 0.0, + 2.25478, + 0.0, + 0.0, + 1.0, + -0.125348, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0654_00": [ + -0.069757, + 0.997564, + 0.0, + -2.12681, + -0.997564, + -0.069757, + 0.0, + 3.06147, + 0.0, + 0.0, + 1.0, + -0.0843, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0654_01": [ + -0.998135, + 0.061049, + 0.0, + 1.80441, + -0.061049, + -0.998135, + 0.0, + 3.90224, + 0.0, + 0.0, + 1.0, + -0.130376, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0655_00": [ + 0.996195, + 0.087156, + 0.0, + -3.42842, + -0.087156, + 0.996195, + 0.0, + -2.87594, + 0.0, + 0.0, + 1.0, + -0.082609, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0655_01": [ + -0.601815, + 0.798635, + 0.0, + -0.95255, + -0.798635, + -0.601815, + 0.0, + 4.18263, + 0.0, + 0.0, + 1.0, + -0.101045, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0655_02": [ + 0.913545, + 0.406737, + 0.0, + -4.42937, + -0.406737, + 0.913545, + 0.0, + -2.38261, + 0.0, + 0.0, + 1.0, + -0.052763, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0656_00": [ + -0.998135, + 0.061049, + 0.0, + 1.69124, + -0.061049, + -0.998135, + 0.0, + 2.1893, + 0.0, + 0.0, + 1.0, + -0.07791, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0656_01": [ + -0.998135, + 0.061049, + 0.0, + 1.4333, + -0.061049, + -0.998135, + 0.0, + 2.62295, + 0.0, + 0.0, + 1.0, + -0.09267, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0656_02": [ + 0.061049, + 0.998135, + 0.0, + -3.42489, + -0.998135, + 0.061049, + 0.0, + 2.42265, + 0.0, + 0.0, + 1.0, + -0.133664, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0656_03": [ + -0.0, + 1.0, + 0.0, + -3.4031, + -1.0, + -0.0, + 0.0, + 2.02713, + 0.0, + 0.0, + 1.0, + -0.090588, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0657_00": [ + 0.008727, + 0.999962, + 0.0, + -2.53265, + -0.999962, + 0.008727, + 0.0, + 3.71519, + 0.0, + 0.0, + 1.0, + -0.06603, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0658_00": [ + 0.275637, + 0.961262, + 0.0, + -3.01062, + -0.961262, + 0.275637, + 0.0, + 2.06795, + 0.0, + 0.0, + 1.0, + -0.041388, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0659_00": [ + -0.976296, + 0.21644, + 0.0, + 0.560831, + -0.21644, + -0.976296, + 0.0, + 1.11185, + 0.0, + 0.0, + 1.0, + -0.051297, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0659_01": [ + 0.999657, + 0.026177, + 0.0, + -1.18389, + -0.026177, + 0.999657, + 0.0, + -1.51457, + 0.0, + 0.0, + 1.0, + -0.031283, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0660_00": [ + -0.649448, + 0.760406, + 0.0, + -0.064755, + -0.760406, + -0.649448, + 0.0, + 4.35347, + 0.0, + 0.0, + 1.0, + -0.0516, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0661_00": [ + 0.453991, + 0.891007, + 0.0, + -5.33135, + -0.891007, + 0.453991, + 0.0, + 2.11142, + 0.0, + 0.0, + 1.0, + -0.163449, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0662_00": [ + -0.008727, + 0.999962, + 0.0, + -0.69048, + -0.999962, + -0.008727, + 0.0, + 1.21705, + 0.0, + 0.0, + 1.0, + -0.074993, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0662_01": [ + -0.008727, + 0.999962, + 0.0, + -0.719316, + -0.999962, + -0.008727, + 0.0, + 3.5671, + 0.0, + 0.0, + 1.0, + -0.0715, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0662_02": [ + 0.017452, + 0.999848, + 0.0, + -0.737608, + -0.999848, + 0.017452, + 0.0, + 1.78623, + 0.0, + 0.0, + 1.0, + -0.050217, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0663_00": [ + -0.207912, + 0.978148, + 0.0, + -1.24521, + -0.978148, + -0.207912, + 0.0, + 2.9398, + 0.0, + 0.0, + 1.0, + -0.12696, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0663_01": [ + 0.942641, + 0.333807, + 0.0, + -3.20082, + -0.333807, + 0.942641, + 0.0, + -2.64887, + 0.0, + 0.0, + 1.0, + -0.151938, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0663_02": [ + 0.566406, + 0.824126, + 0.0, + -3.51338, + -0.824126, + 0.566406, + 0.0, + 0.803752, + 0.0, + 0.0, + 1.0, + -0.152579, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0664_00": [ + -0.130526, + 0.991445, + 0.0, + -1.24855, + -0.991445, + -0.130526, + 0.0, + 1.70253, + 0.0, + 0.0, + 1.0, + -0.038416, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0664_01": [ + -0.21644, + 0.976296, + 0.0, + -1.05125, + -0.976296, + -0.21644, + 0.0, + 1.6443, + 0.0, + 0.0, + 1.0, + -0.038906, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0664_02": [ + 0.139173, + 0.990268, + 0.0, + -1.40426, + -0.990268, + 0.139173, + 0.0, + 3.26597, + 0.0, + 0.0, + 1.0, + -0.029179, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0665_00": [ + 0.382683, + 0.92388, + 0.0, + -5.448, + -0.92388, + 0.382683, + 0.0, + 2.6298, + 0.0, + 0.0, + 1.0, + -0.1835, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0665_01": [ + 0.292372, + 0.956305, + 0.0, + -6.35175, + -0.956305, + 0.292372, + 0.0, + 2.75655, + 0.0, + 0.0, + 1.0, + -0.284697, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0666_00": [ + -0.173648, + 0.984808, + 0.0, + -2.85084, + -0.984808, + -0.173648, + 0.0, + 5.92318, + 0.0, + 0.0, + 1.0, + -0.083883, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0666_01": [ + -0.121869, + 0.992546, + 0.0, + -3.15576, + -0.992546, + -0.121869, + 0.0, + 5.96704, + 0.0, + 0.0, + 1.0, + -0.112934, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0666_02": [ + 0.994522, + 0.104528, + 0.0, + -3.68362, + -0.104528, + 0.994522, + 0.0, + -4.9939, + 0.0, + 0.0, + 1.0, + -0.122927, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0667_00": [ + -0.748956, + 0.66262, + 0.0, + 0.140274, + -0.66262, + -0.748956, + 0.0, + 8.63666, + 0.0, + 0.0, + 1.0, + -0.128061, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0667_01": [ + -0.008727, + 0.999962, + 0.0, + -3.72756, + -0.999962, + -0.008727, + 0.0, + 5.65807, + 0.0, + 0.0, + 1.0, + -0.226795, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0667_02": [ + -0.48481, + 0.87462, + 0.0, + -1.96486, + -0.87462, + -0.48481, + 0.0, + 7.82878, + 0.0, + 0.0, + 1.0, + -0.186957, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0668_00": [ + 0.688355, + 0.725374, + 0.0, + -5.31107, + -0.725374, + 0.688355, + 0.0, + -0.533174, + 0.0, + 0.0, + 1.0, + -0.16382, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0669_00": [ + -0.91706, + 0.398749, + 0.0, + 0.300111, + -0.398749, + -0.91706, + 0.0, + 6.20605, + 0.0, + 0.0, + 1.0, + -0.226055, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0669_01": [ + -0.390731, + 0.920505, + 0.0, + -1.49619, + -0.920505, + -0.390731, + 0.0, + 5.86103, + 0.0, + 0.0, + 1.0, + -0.100011, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0670_00": [ + -0.087156, + 0.996195, + 0.0, + -1.59085, + -0.996195, + -0.087156, + 0.0, + 2.53656, + 0.0, + 0.0, + 1.0, + -0.104031, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0670_01": [ + -0.292372, + 0.956305, + 0.0, + -1.86697, + -0.956305, + -0.292372, + 0.0, + 4.42023, + 0.0, + 0.0, + 1.0, + -0.150729, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0671_00": [ + 0.92388, + 0.382683, + 0.0, + -4.42246, + -0.382683, + 0.92388, + 0.0, + -2.42699, + 0.0, + 0.0, + 1.0, + -0.045466, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0671_01": [ + 0.953717, + 0.300706, + 0.0, + -2.68677, + -0.300706, + 0.953717, + 0.0, + -2.13942, + 0.0, + 0.0, + 1.0, + -0.026837, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0672_00": [ + 0.422618, + 0.906308, + 0.0, + -6.43722, + -0.906308, + 0.422618, + 0.0, + 2.00868, + 0.0, + 0.0, + 1.0, + -0.071102, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0672_01": [ + -0.990268, + 0.139173, + 0.0, + 4.10015, + -0.139173, + -0.990268, + 0.0, + 4.80755, + 0.0, + 0.0, + 1.0, + -0.106053, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0673_00": [ + -0.902585, + 0.430511, + 0.0, + 1.68622, + -0.430511, + -0.902585, + 0.0, + 5.43248, + 0.0, + 0.0, + 1.0, + -0.128879, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0673_01": [ + -0.824126, + 0.566406, + 0.0, + 1.17123, + -0.566406, + -0.824126, + 0.0, + 5.79139, + 0.0, + 0.0, + 1.0, + -0.133955, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0673_02": [ + 0.936672, + 0.350207, + 0.0, + -4.74657, + -0.350207, + 0.936672, + 0.0, + -3.3331, + 0.0, + 0.0, + 1.0, + -0.129799, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0673_03": [ + -0.241922, + 0.970296, + 0.0, + -2.15208, + -0.970296, + -0.241922, + 0.0, + 3.14653, + 0.0, + 0.0, + 1.0, + -0.076745, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0673_04": [ + -0.979925, + 0.199368, + 0.0, + 2.94432, + -0.199368, + -0.979925, + 0.0, + 5.33239, + 0.0, + 0.0, + 1.0, + -0.138984, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0673_05": [ + 0.93358, + 0.358368, + 0.0, + -5.32826, + -0.358368, + 0.93358, + 0.0, + -2.76577, + 0.0, + 0.0, + 1.0, + -0.229407, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0674_00": [ + -0.993572, + 0.113203, + 0.0, + 2.45476, + -0.113203, + -0.993572, + 0.0, + 2.33513, + 0.0, + 0.0, + 1.0, + -0.531739, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0674_01": [ + 0.008727, + 0.999962, + 0.0, + -2.82556, + -0.999962, + 0.008727, + 0.0, + 1.70119, + 0.0, + 0.0, + 1.0, + -0.063988, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0675_00": [ + -0.147809, + 0.989016, + 0.0, + -1.3698, + -0.989016, + -0.147809, + 0.0, + 3.56135, + 0.0, + 0.0, + 1.0, + -0.039602, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0675_01": [ + 0.951057, + 0.309017, + 0.0, + -2.89195, + -0.309017, + 0.951057, + 0.0, + -1.15381, + 0.0, + 0.0, + 1.0, + -0.023879, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0676_00": [ + -0.113203, + 0.993572, + 0.0, + -0.711584, + -0.993572, + -0.113203, + 0.0, + 4.20475, + 0.0, + 0.0, + 1.0, + -0.021542, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0676_01": [ + 0.241922, + 0.970296, + 0.0, + -1.83845, + -0.970296, + 0.241922, + 0.0, + 0.907752, + 0.0, + 0.0, + 1.0, + -0.091161, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0677_00": [ + -0.061049, + 0.998135, + 0.0, + -1.6328, + -0.998135, + -0.061049, + 0.0, + 3.39758, + 0.0, + 0.0, + 1.0, + -0.069172, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0677_01": [ + -0.017452, + 0.999848, + 0.0, + -1.76865, + -0.999848, + -0.017452, + 0.0, + 3.12171, + 0.0, + 0.0, + 1.0, + -0.088306, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0677_02": [ + 0.25038, + 0.968148, + 0.0, + -3.62995, + -0.968148, + 0.25038, + 0.0, + 3.79514, + 0.0, + 0.0, + 1.0, + -0.073435, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0678_00": [ + 0.477159, + 0.878817, + 0.0, + -8.50529, + -0.878817, + 0.477159, + 0.0, + 0.343391, + 0.0, + 0.0, + 1.0, + -0.067396, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0678_01": [ + 0.788011, + 0.615662, + 0.0, + -7.38254, + -0.615662, + 0.788011, + 0.0, + -1.16429, + 0.0, + 0.0, + 1.0, + -0.172968, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0678_02": [ + 0.782608, + 0.622515, + 0.0, + -7.33623, + -0.622515, + 0.782608, + 0.0, + -1.25481, + 0.0, + 0.0, + 1.0, + -0.051361, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0679_00": [ + -0.113203, + 0.993572, + 0.0, + -1.70785, + -0.993572, + -0.113203, + 0.0, + 2.383, + 0.0, + 0.0, + 1.0, + -0.054763, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0679_01": [ + -0.113203, + 0.993572, + 0.0, + -1.63495, + -0.993572, + -0.113203, + 0.0, + 3.69062, + 0.0, + 0.0, + 1.0, + -0.066138, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0680_00": [ + -0.173648, + 0.984808, + 0.0, + -2.05071, + -0.984808, + -0.173648, + 0.0, + 4.28046, + 0.0, + 0.0, + 1.0, + -0.033778, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0680_01": [ + -0.976296, + 0.21644, + 0.0, + 1.72481, + -0.21644, + -0.976296, + 0.0, + 2.35028, + 0.0, + 0.0, + 1.0, + -0.065115, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0681_00": [ + -0.095846, + 0.995396, + 0.0, + -2.88648, + -0.995396, + -0.095846, + 0.0, + 2.1163, + 0.0, + 0.0, + 1.0, + -0.104662, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0682_00": [ + 0.991445, + 0.130526, + 0.0, + -2.59599, + -0.130526, + 0.991445, + 0.0, + -4.11012, + 0.0, + 0.0, + 1.0, + -0.07992, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0683_00": [ + 1.0, + 0.0, + 0.0, + -1.21081, + 0.0, + 1.0, + 0.0, + -1.87821, + 0.0, + 0.0, + 1.0, + -0.184625, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0684_00": [ + 0.034899, + 0.999391, + 0.0, + -1.24903, + -0.999391, + 0.034899, + 0.0, + 1.52998, + 0.0, + 0.0, + 1.0, + -0.042872, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0684_01": [ + 1.0, + 0.0, + 0.0, + -1.1596, + 0.0, + 1.0, + 0.0, + -3.37222, + 0.0, + 0.0, + 1.0, + -0.027973, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0685_00": [ + 0.104528, + 0.994522, + 0.0, + -1.77438, + -0.994522, + 0.104528, + 0.0, + 2.43888, + 0.0, + 0.0, + 1.0, + -0.067187, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0685_01": [ + -0.325568, + 0.945519, + 0.0, + -1.0358, + -0.945519, + -0.325568, + 0.0, + 3.27061, + 0.0, + 0.0, + 1.0, + -0.05206, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0685_02": [ + 0.224951, + 0.97437, + 0.0, + -2.34878, + -0.97437, + 0.224951, + 0.0, + 2.69315, + 0.0, + 0.0, + 1.0, + -0.081361, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0686_00": [ + 0.130526, + 0.991445, + 0.0, + -2.24539, + -0.991445, + 0.130526, + 0.0, + 3.14629, + 0.0, + 0.0, + 1.0, + -0.043508, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0686_01": [ + 0.113203, + 0.993572, + 0.0, + -1.99005, + -0.993572, + 0.113203, + 0.0, + 1.42709, + 0.0, + 0.0, + 1.0, + -0.043258, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0686_02": [ + 0.017452, + 0.999848, + 0.0, + -1.83922, + -0.999848, + 0.017452, + 0.0, + 3.88359, + 0.0, + 0.0, + 1.0, + -0.022387, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0687_00": [ + -0.147809, + 0.989016, + 0.0, + -1.3812, + -0.989016, + -0.147809, + 0.0, + 2.30666, + 0.0, + 0.0, + 1.0, + -0.044378, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0688_00": [ + 0.390731, + 0.920505, + 0.0, + -3.75504, + -0.920505, + 0.390731, + 0.0, + 0.668476, + 0.0, + 0.0, + 1.0, + -0.101237, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0689_00": [ + -0.999391, + 0.034899, + 0.0, + 3.88253, + -0.034899, + -0.999391, + 0.0, + 2.93303, + 0.0, + 0.0, + 1.0, + -0.046659, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0690_00": [ + 0.848048, + 0.529919, + 0.0, + -7.00131, + -0.529919, + 0.848048, + 0.0, + -1.56194, + 0.0, + 0.0, + 1.0, + -0.062151, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0690_01": [ + 0.374607, + 0.927184, + 0.0, + -4.79788, + -0.927184, + 0.374607, + 0.0, + 2.46813, + 0.0, + 0.0, + 1.0, + -0.064568, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0691_00": [ + -0.594823, + 0.803857, + 0.0, + -1.51076, + -0.803857, + -0.594823, + 0.0, + 4.73974, + 0.0, + 0.0, + 1.0, + -0.029963, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0691_01": [ + -0.147809, + 0.989016, + 0.0, + -3.30156, + -0.989016, + -0.147809, + 0.0, + 5.45398, + 0.0, + 0.0, + 1.0, + -0.067462, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0692_00": [ + 0.999048, + 0.043619, + 0.0, + -5.14344, + -0.043619, + 0.999048, + 0.0, + -3.7978, + 0.0, + 0.0, + 1.0, + -0.085536, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0692_01": [ + 0.052336, + 0.99863, + 0.0, + -2.05981, + -0.99863, + 0.052336, + 0.0, + 1.91066, + 0.0, + 0.0, + 1.0, + -0.1268, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0692_02": [ + -0.069757, + 0.997564, + 0.0, + -3.59527, + -0.997564, + -0.069757, + 0.0, + 4.50968, + 0.0, + 0.0, + 1.0, + -0.063253, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0692_03": [ + -0.104529, + 0.994522, + 0.0, + -3.70083, + -0.994522, + -0.104529, + 0.0, + 4.92324, + 0.0, + 0.0, + 1.0, + -0.078292, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0692_04": [ + 0.095846, + 0.995396, + 0.0, + -1.6792, + -0.995396, + 0.095846, + 0.0, + 2.41382, + 0.0, + 0.0, + 1.0, + -0.838779, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0693_00": [ + 0.061049, + 0.998135, + 0.0, + -2.84909, + -0.998135, + 0.061049, + 0.0, + 1.02378, + 0.0, + 0.0, + 1.0, + -0.040003, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0693_01": [ + 0.052336, + 0.99863, + 0.0, + -2.9554, + -0.99863, + 0.052336, + 0.0, + 1.00171, + 0.0, + 0.0, + 1.0, + -0.059599, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0693_02": [ + -0.113203, + 0.993572, + 0.0, + -2.30075, + -0.993572, + -0.113203, + 0.0, + 3.34708, + 0.0, + 0.0, + 1.0, + -0.048644, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0694_00": [ + -0.300706, + 0.953717, + 0.0, + -1.6902, + -0.953717, + -0.300706, + 0.0, + 5.62277, + 0.0, + 0.0, + 1.0, + -0.046741, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0694_01": [ + -0.147809, + 0.989016, + 0.0, + -1.82798, + -0.989016, + -0.147809, + 0.0, + 5.74443, + 0.0, + 0.0, + 1.0, + -0.134702, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0695_00": [ + 0.113203, + 0.993572, + 0.0, + -2.02844, + -0.993572, + 0.113203, + 0.0, + 2.84706, + 0.0, + 0.0, + 1.0, + -0.076598, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0695_01": [ + 0.333807, + 0.942641, + 0.0, + -2.68095, + -0.942641, + 0.333807, + 0.0, + 1.18859, + 0.0, + 0.0, + 1.0, + -0.072773, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0695_02": [ + 0.165048, + 0.986286, + 0.0, + -2.14238, + -0.986286, + 0.165048, + 0.0, + 1.74448, + 0.0, + 0.0, + 1.0, + -0.077045, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0695_03": [ + 0.594823, + 0.803857, + 0.0, + -3.41438, + -0.803857, + 0.594823, + 0.0, + 0.124547, + 0.0, + 0.0, + 1.0, + -0.106302, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0696_00": [ + 0.793353, + 0.608761, + 0.0, + -5.15734, + -0.608761, + 0.793353, + 0.0, + -1.26944, + 0.0, + 0.0, + 1.0, + -0.098376, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0696_01": [ + -0.999048, + 0.043619, + 0.0, + 2.43337, + -0.043619, + -0.999048, + 0.0, + 4.30846, + 0.0, + 0.0, + 1.0, + -0.165467, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0696_02": [ + 0.622515, + 0.782608, + 0.0, + -5.09051, + -0.782608, + 0.622515, + 0.0, + 0.645514, + 0.0, + 0.0, + 1.0, + -0.063434, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0697_00": [ + 0.190809, + 0.981627, + 0.0, + -3.23849, + -0.981627, + 0.190809, + 0.0, + 2.1276, + 0.0, + 0.0, + 1.0, + -0.122566, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0697_01": [ + -0.891006, + 0.453991, + 0.0, + 1.4482, + -0.453991, + -0.891006, + 0.0, + 4.5501, + 0.0, + 0.0, + 1.0, + -0.13251, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0697_02": [ + -0.414693, + 0.909961, + 0.0, + -0.565237, + -0.909961, + -0.414693, + 0.0, + 4.23828, + 0.0, + 0.0, + 1.0, + -0.220006, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0697_03": [ + -0.422618, + 0.906308, + 0.0, + -0.558677, + -0.906308, + -0.422618, + 0.0, + 4.35113, + 0.0, + 0.0, + 1.0, + -0.122481, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0698_00": [ + -0.350207, + 0.936672, + 0.0, + -1.2285, + -0.936672, + -0.350207, + 0.0, + 3.89309, + 0.0, + 0.0, + 1.0, + -0.055507, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0698_01": [ + 0.21644, + 0.976296, + 0.0, + -2.7483, + -0.976296, + 0.21644, + 0.0, + 3.1888, + 0.0, + 0.0, + 1.0, + -0.03689, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0699_00": [ + 0.258819, + 0.965926, + 0.0, + -3.49521, + -0.965926, + 0.258819, + 0.0, + 2.258, + 0.0, + 0.0, + 1.0, + -0.065463, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0700_00": [ + -0.809017, + 0.587785, + 0.0, + 0.352544, + -0.587785, + -0.809017, + 0.0, + 3.71646, + 0.0, + 0.0, + 1.0, + -0.046252, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0700_01": [ + -0.190809, + 0.981627, + 0.0, + -0.964843, + -0.981627, + -0.190809, + 0.0, + 3.06762, + 0.0, + 0.0, + 1.0, + -0.046133, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0700_02": [ + -0.190809, + 0.981627, + 0.0, + -1.10037, + -0.981627, + -0.190809, + 0.0, + 3.02647, + 0.0, + 0.0, + 1.0, + -0.031642, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0701_00": [ + -0.95882, + 0.284015, + 0.0, + 1.34411, + -0.284015, + -0.95882, + 0.0, + 3.70514, + 0.0, + 0.0, + 1.0, + -0.057107, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0701_01": [ + 0.986286, + 0.165048, + 0.0, + -2.67721, + -0.165048, + 0.986286, + 0.0, + -1.92331, + 0.0, + 0.0, + 1.0, + -0.03805, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0701_02": [ + -0.309017, + 0.951056, + 0.0, + -1.7119, + -0.951056, + -0.309017, + 0.0, + 3.64579, + 0.0, + 0.0, + 1.0, + -0.489568, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0702_00": [ + -0.731354, + 0.681998, + 0.0, + -0.031488, + -0.681998, + -0.731354, + 0.0, + 1.90708, + 0.0, + 0.0, + 1.0, + -0.046659, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0702_01": [ + -0.994522, + 0.104528, + 0.0, + 1.13459, + -0.104528, + -0.994522, + 0.0, + 1.80104, + 0.0, + 0.0, + 1.0, + -0.069529, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0702_02": [ + -0.999962, + 0.008727, + 0.0, + 1.01268, + -0.008727, + -0.999962, + 0.0, + 0.536939, + 0.0, + 0.0, + 1.0, + -0.061014, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0703_00": [ + 0.034899, + 0.999391, + 0.0, + -2.53219, + -0.999391, + 0.034899, + 0.0, + 3.79658, + 0.0, + 0.0, + 1.0, + -0.075175, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0703_01": [ + 0.34202, + 0.939693, + 0.0, + -4.69327, + -0.939693, + 0.34202, + 0.0, + 3.11156, + 0.0, + 0.0, + 1.0, + -0.047374, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0704_00": [ + -0.267238, + 0.96363, + 0.0, + -2.2971, + -0.96363, + -0.267238, + 0.0, + 3.63123, + 0.0, + 0.0, + 1.0, + -0.108627, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0704_01": [ + 0.656059, + 0.75471, + 0.0, + -4.0016, + -0.75471, + 0.656059, + 0.0, + 0.480199, + 0.0, + 0.0, + 1.0, + -0.07405, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0705_00": [ + 0.052336, + 0.99863, + 0.0, + -1.66754, + -0.99863, + 0.052336, + 0.0, + 2.84952, + 0.0, + 0.0, + 1.0, + -0.142896, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0705_01": [ + 0.087156, + 0.996195, + 0.0, + -1.91084, + -0.996195, + 0.087156, + 0.0, + 2.1951, + 0.0, + 0.0, + 1.0, + -0.105708, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0705_02": [ + 0.173648, + 0.984808, + 0.0, + -2.69916, + -0.984808, + 0.173648, + 0.0, + 1.84084, + 0.0, + 0.0, + 1.0, + -0.074887, + 0.0, + 0.0, + 0.0, + 1.0 + ], + "scene0706_00": [ + -0.233445, + 0.97237, + 0.0, + -2.56981, + -0.97237, + -0.233445, + 0.0, + 2.95338, + 0.0, + 0.0, + 1.0, + -0.156213, + 0.0, + 0.0, + 0.0, + 1.0 + ] +} diff --git a/data_preparation/process_all_scan.py b/data_preparation/process_all_scan.py new file mode 100644 index 0000000..7509110 --- /dev/null +++ b/data_preparation/process_all_scan.py @@ -0,0 +1,243 @@ +import json +import os +from argparse import ArgumentParser +from functools import wraps +from typing import Dict, Tuple + +import mmengine +import numpy as np +import torch +from pytorch3d.transforms import euler_angles_to_matrix +from tqdm import tqdm +from utils.data_utils import read_annotation_pickle +from utils.mp3d_process import process_mp3d +from utils.pcd_utils import is_inside_box +from utils.scannet_process import process_scannet +from utils.trscan_process import process_3rscan + +es_anno = {} + + +def mmengine_track_func(func): + + @wraps(func) + def wrapped_func(args): + result = func(*args) + return result + + return wrapped_func + + +def create_scene_pcd(es_anno: dict, + pcd_result: Tuple[np.ndarray, np.ndarray, np.ndarray]) \ + -> Tuple[np.ndarray, np.ndarray, + np.ndarray, np.ndarray]: + """Adding the embodiedscan-box annotation into the point clouds data. + + Args: + es_anno (dict): The embodiedscan annotation of + the target scan. + pcd_result (Tuple [np.ndarray, np.ndarray, np.ndarray) : + The raw point cloud data of the scan, consisting of: + (1) aliged point clouds coordinates with shape (n,3). + (2) point clouds color ([0,1]) with shape (n,3). + (3) label (no need here). + + Returns: + Tuple [np.ndarray, np.ndarray, np.ndarray, np.ndarray] : + The processed point cloud data of the scan, consisting of: + (1) aliged point clouds coordinates with shape (n,3). + (2) point clouds color ([0,1]) with shape (n,3). + (3) point clouds label with shape (n,1). + (4) point clouds object id (int) with shape (n,1). + """ + pc, color, label = pcd_result + label = np.ones_like(label) * -100 + instance_ids = np.ones(pc.shape[0], dtype=np.int16) * (-100) + bboxes = es_anno['bboxes'].reshape(-1, 9) + bboxes[:, 3:6] = np.clip(bboxes[:, 3:6], a_min=1e-2, a_max=None) + object_ids = es_anno['object_ids'] + object_types = es_anno['object_types'] # str + sorted_indices = sorted(enumerate(bboxes), + key=lambda x: -np.prod(x[1][3:6])) + # the larger the box, the smaller the index + sorted_indices_list = [index for index, value in sorted_indices] + + bboxes = [bboxes[index] for index in sorted_indices_list] + object_ids = [object_ids[index] for index in sorted_indices_list] + object_types = [object_types[index] for index in sorted_indices_list] + + for box, obj_id, obj_type in zip(bboxes, object_ids, object_types): + obj_type_id = TYPE2INT.get(obj_type, -1) + center, size = box[:3], box[3:6] + + orientation = np.array( + euler_angles_to_matrix(torch.tensor(box[np.newaxis, 6:]), + convention='ZXY')[0]) + + box_pc_mask = is_inside_box(pc, center, size, orientation) + + instance_ids[box_pc_mask] = obj_id + label[box_pc_mask] = obj_type_id + return pc, color, label, instance_ids + + +@mmengine_track_func +def process_one_scan( + scan_id: str, + save_root: str, + scannet_root: str, + mp3d_root: str, + trscan_root: str, + scannet_matrix: Dict[str, np.ndarray], + mp3d_matrix: Dict[str, np.ndarray], + trscan_matrix: Dict[str, np.ndarray], + mp3d_mapping: Dict[str, str], +): + """Process the point clouds of one scan and save in a pth file. + + The pth file is a tuple of np.ndarray, consisting of: + (1) aliged point clouds coordinates with shape (n,3). + (2) point clouds color ranging in [0,1] with shape (n,3). + (3) point clouds label with shape (n,1). + (4) point clouds object id with shape (n,1). + Args: + scan_id (str): The scan id. + save_root (str): The root path to save the pth file. + scannet_root (str): The path of scannet. + mp3d_root (str): The path of mp3d. + trscan_root (str): The path of 3rscan. + scannet_matrix (np.ndarray): The aligned matrix of scannet. + mp3d_matrix (np.ndarray): The aligned matrix of mp3d. + trscan_matrix (np.ndarray): The aligned matrix of 3rscan. + mp3d_mapping (dict): The mapping dict for mp3d scan id. + """ + + if os.path.exists(f'{save_root}/{scan_id}.pth'): + return + + try: + if 'scene' in scan_id: + if 'scannet/' + scan_id not in es_anno: + return + + pcd_info = create_scene_pcd( + es_anno['scannet/' + scan_id], + process_scannet(scan_id, scannet_root, scannet_matrix), + ) + + elif 'mp3d' in scan_id: + raw_scan_id, region_id = ( + mp3d_mapping[scan_id.split('_region')[0]], + 'region' + scan_id.split('_region')[1], + ) + mapping_name = f'matterport3d/{raw_scan_id}/{region_id}' + if mapping_name not in es_anno: + return + + pcd_info = create_scene_pcd( + es_anno[mapping_name], + process_mp3d(scan_id, mp3d_root, mp3d_matrix, mp3d_mapping), + ) + + else: + if '3rscan/' + scan_id not in es_anno: + return + pcd_info = create_scene_pcd( + es_anno['3rscan/' + scan_id], + process_3rscan(scan_id, trscan_root, trscan_matrix), + ) + + save_path = f'{save_root}/{scan_id}.pth' + torch.save(pcd_info, save_path) + + except Exception as error: + print(error) + print(f'Error in processing {scan_id}') + + +if __name__ == '__main__': + path_of_version1 = '../mmscan_data/embodiedscan_split/embodiedscan-v1' + parser = ArgumentParser() + parser.add_argument('--meta_path', type=str, default='./meta_data') + parser.add_argument( + '--data_root', + type=str, + default=f'{path_of_version1}/data', + ) + parser.add_argument( + '--save_root', + type=str, + default=f'{path_of_version1}/process_pcd', + ) + parser.add_argument( + '--train_pkl_path', + type=str, + default=f'{path_of_version1}/embodiedscan_infos_train.pkl', + ) + parser.add_argument( + '--val_pkl_path', + type=str, + default=f'{path_of_version1}/embodiedscan_infos_val.pkl', + ) + parser.add_argument('--nproc', type=int, default=8) + args = parser.parse_args() + + os.makedirs(args.save_root, exist_ok=True) + + scannet_root = f'{args.data_root}/scannet' + mp3d_root = f'{args.data_root}/matterport3d' + trscan_root = f'{args.data_root}/3rscan' + + # (0) some necessary info + with open(f'{args.meta_path}/mp3d_mapping.json', 'r') as f: + mapping = json.load(f) + mapping = {v: k for k, v in mapping.items()} + + TYPE2INT = np.load(args.train_pkl_path, + allow_pickle=True)['metainfo']['categories'] + es_anno.update(read_annotation_pickle(args.train_pkl_path)) + es_anno.update(read_annotation_pickle(args.val_pkl_path)) + + # loading the required scan id + with open(f'{args.meta_path}/all_scan.json', 'r') as f: + scan_id_list = json.load(f) + + # (1) loading the axis matrix info + mp3d_matrix = np.load(f'{args.meta_path}/mp3d_matrix.npy', + allow_pickle=True).item() + trscan_matrix = np.load(f'{args.meta_path}/3rscan_matrix.npy', + allow_pickle=True).item() + with open(f'{args.meta_path}/scans_axis_alignment_matrices.json', + 'r') as f: + scan2axis_align = json.load(f) + scannet_matrix = {} + for scan_id in scan2axis_align: + scannet_matrix[scan_id] = np.array(scan2axis_align[scan_id], + dtype=np.float32).reshape(4, 4) + + # (2) Collecting task + tasks = [] + for scan_id in scan_id_list: + tasks.append(( + scan_id, + args.save_root, + scannet_root, + mp3d_root, + trscan_root, + scannet_matrix, + mp3d_matrix, + trscan_matrix, + mapping, + )) + + # (3) processing steps + + parallel = args.nproc > 1 + + if parallel: + mmengine.utils.track_parallel_progress(process_one_scan, tasks, + args.nproc) + else: + for param in tqdm(tasks): + process_one_scan(param) diff --git a/data_preparation/utils/data_utils.py b/data_preparation/utils/data_utils.py new file mode 100644 index 0000000..177521f --- /dev/null +++ b/data_preparation/utils/data_utils.py @@ -0,0 +1,130 @@ +import numpy as np +from tqdm import tqdm + + +def read_annotation_pickle(path: str, show_progress: bool = True): + """Read annotation pickle file and return a dictionary, the embodiedscan + annotation for all scans in the split. + + Args: + path (str): the path of the annotation pickle file. + show_progress (bool): whether showing the progress. + Returns: + dict: A dictionary. + scene_id : (bboxes, object_ids, object_types, visible_dict, + extrinsics_c2w, axis_align_matrix, intrinsics, image_paths) + bboxes: numpy array of bounding boxes, + shape (N, 9): xyz, lwh, ypr + object_ids: numpy array of obj ids, shape (N,) + object_types: list of strings, each string is a type of object + visible_view_object_dict: a dictionary {view_id: + visible_instance_ids} + extrinsics_c2w: a list of 4x4 matrices, each matrix is the + extrinsic matrix of a view + axis_align_matrix: a 4x4 matrix, the axis-aligned matrix + of the scene + intrinsics: a list of 4x4 matrices, each matrix is the + intrinsic matrix of a view + image_paths: a list of strings, each string is the path + of an image in the scene + """ + with open(path, 'rb') as f: + data = np.load(f, allow_pickle=True) + + metainfo = data['metainfo'] + object_type_to_int = metainfo['categories'] + object_int_to_type = {v: k for k, v in object_type_to_int.items()} + datalist = data['data_list'] + output_data = {} + pbar = (tqdm(range(len(datalist))) if show_progress else range( + len(datalist))) + for scene_idx in pbar: + # print(datalist[scene_idx]['sample_idx']) + # if "matterport3d" not in datalist[scene_idx]['sample_idx']: + # continue + # print(datalist[scene_idx].keys()) + images = datalist[scene_idx]['images'] + # print(images[0].keys()) + + intrinsic = datalist[scene_idx].get('cam2img', None) # a 4x4 matrix + missing_intrinsic = False + if intrinsic is None: + missing_intrinsic = ( + True # each view has different intrinsic for mp3d + ) + depth_intrinsic = datalist[scene_idx].get( + 'cam2depth', None) # a 4x4 matrix, for 3rscan + if depth_intrinsic is None and not missing_intrinsic: + depth_intrinsic = datalist[scene_idx][ + 'depth_cam2img'] # a 4x4 matrix, for scannet + axis_align_matrix = datalist[scene_idx][ + 'axis_align_matrix'] # a 4x4 matrix + + scene_id = datalist[scene_idx]['sample_idx'] + + instances = datalist[scene_idx]['instances'] + bboxes = [] + object_ids = [] + object_types = [] + object_type_ints = [] + for object_idx in range(len(instances)): + bbox_3d = instances[object_idx]['bbox_3d'] # list of 9 values + bbox_label_3d = instances[object_idx]['bbox_label_3d'] # int + bbox_id = instances[object_idx]['bbox_id'] # int + object_type = object_int_to_type[bbox_label_3d] + + object_type_ints.append(bbox_label_3d) + object_types.append(object_type) + bboxes.append(bbox_3d) + object_ids.append(bbox_id) + bboxes = np.array(bboxes) + object_ids = np.array(object_ids) + object_type_ints = np.array(object_type_ints) + + visible_view_object_dict = {} + visible_view_object_list = [] + extrinsics_c2w = [] + intrinsics = [] + depth_intrinsics = [] + image_paths = [] + depth_image_paths = [] + + for image_idx in range(len(images)): + img_path = images[image_idx]['img_path'] # str + depth_image = images[image_idx]['depth_path'] + extrinsic_id = img_path.split('/')[-1].split('.')[0] # str + cam2global = images[image_idx]['cam2global'] # a 4x4 matrix + + if missing_intrinsic: + intrinsic = images[image_idx]['cam2img'] + + depth_intrinsic = images[image_idx]['cam2img'] + visible_instance_indices = images[image_idx][ + 'visible_instance_ids'] # numpy array of int + visible_instance_ids = object_ids[visible_instance_indices] + visible_view_object_dict[extrinsic_id] = visible_instance_ids + visible_view_object_list.append(visible_instance_ids) + extrinsics_c2w.append(cam2global) + intrinsics.append(intrinsic) + depth_intrinsics.append(depth_intrinsic) + image_paths.append(img_path) + depth_image_paths.append(depth_image) + if show_progress: + pbar.set_description(f'Processing scene {scene_id}') + output_data[scene_id] = { + # object level + 'bboxes': bboxes, + 'object_ids': object_ids, + 'object_types': object_types, + 'object_type_ints': object_type_ints, + # image level + 'visible_instance_ids': visible_view_object_list, + 'visible_view_object_dict': visible_view_object_dict, + 'extrinsics_c2w': extrinsics_c2w, + 'axis_align_matrix': axis_align_matrix, + 'intrinsics': intrinsics, + 'depth_intrinsics': depth_intrinsics, + 'image_paths': image_paths, + 'depth_image_paths': depth_image_paths, + } + return output_data diff --git a/data_preparation/utils/mp3d_process.py b/data_preparation/utils/mp3d_process.py new file mode 100644 index 0000000..92caffe --- /dev/null +++ b/data_preparation/utils/mp3d_process.py @@ -0,0 +1,36 @@ +import numpy as np +from plyfile import PlyData + + +def process_mp3d(new_scan_id: str, data_root: str, + axis_align_matrix_dict: dict, mapping: dict): + """Process matterport3d data. + + Args: + new_scan_id (str): processed ID of the matterport3d scan. + data_root (str): Root directory of the matterport3d dataset. + axis_align_matrix_dict (dict): Dict of axis alignment matrices + for each scan. + mapping (dict) : Dict of mapping names. + + Returns: + tuple : point_xyz and point_rgb infos. + """ + axis_align_matrix = axis_align_matrix_dict[new_scan_id] + + scan_id, region_id = ( + new_scan_id.split('_region')[0], + 'region' + new_scan_id.split('_region')[1], + ) + a = PlyData.read( + f'{data_root}/{mapping[scan_id]}/region_segmentations/{region_id}.ply') + v = np.array([list(x) for x in a.elements[0]]) + + pc = np.ascontiguousarray(v[:, :3]) + pts = np.ones((pc.shape[0], 4), dtype=pc.dtype) + pts[:, :3] = pc + pc = np.dot(pts, axis_align_matrix.transpose())[:, :3].astype(np.float32) + colors = np.ascontiguousarray(v[:, -3:]) + colors = colors / 255.0 + colors = colors.astype(np.float32) + return pc, colors, colors[:, 0] diff --git a/data_preparation/utils/pcd_utils.py b/data_preparation/utils/pcd_utils.py new file mode 100644 index 0000000..5a0cbd4 --- /dev/null +++ b/data_preparation/utils/pcd_utils.py @@ -0,0 +1,132 @@ +import os + +import numpy as np +import torch +from plyfile import PlyData + + +def read_mesh_vertices_rgb(filename: str) -> np.ndarray: + """Read XYZ and RGB for each vertex. + + Args: + filename(str): The name of the mesh vertices file. + + Returns: + np.ndarray: Note that RGB values are in 0-255. + """ + assert os.path.isfile(filename) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + vertices[:, 3] = plydata['vertex'].data['red'] + vertices[:, 4] = plydata['vertex'].data['green'] + vertices[:, 5] = plydata['vertex'].data['blue'] + return vertices + + +def is_inside_box(points: np.ndarray, center: np.ndarray, size: np.ndarray, + rotation_mat: np.ndarray) -> np.ndarray: + """Check if points are inside a 3D bounding box. + + Args: + points(np.ndarray): 3D points, numpy array of shape (n, 3). + center(np.ndarray): center of the box, numpy array of shape (3, ). + size(np.ndarray): size of the box, numpy array of shape (3, ). + rotation_mat(np.ndarray): rotation matrix of the box, + numpy array of shape (3, 3). + + Returns: + np.ndarray: Boolean array of shape (n, ) indicating if each point + is inside the box. + """ + assert points.shape[1] == 3, 'points should be of shape (n, 3)' + center = np.array(center) # n, 3 + size = np.array(size) # n, 3 + rotation_mat = np.array(rotation_mat) + assert rotation_mat.shape == ( + 3, + 3, + ), f'R should be shape (3,3), but got {rotation_mat.shape}' + pcd_local = (points - center) @ rotation_mat # n, 3 + pcd_local = pcd_local / size * 2.0 # scale to [-1, 1] # n, 3 + pcd_local = abs(pcd_local) + return ((pcd_local[:, 0] <= 1) + & (pcd_local[:, 1] <= 1) + & (pcd_local[:, 2] <= 1)) + + +def _axis_angle_rotation(axis: str, angle: np.ndarray) -> np.ndarray: + """Return the rotation matrices for one of the rotations about an axis of + which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = np.cos(angle) + sin = np.sin(angle) + one = np.ones_like(angle) + zero = np.zeros_like(angle) + + if axis == 'X': + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == 'Y': + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == 'Z': + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError('letter must be either X, Y or Z.') + + return np.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: np.ndarray, + convention: str) -> np.ndarray: + """Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as array of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as array of shape (..., 3, 3). + """ + if euler_angles.ndim == 0 or euler_angles.shape[-1] != 3: + raise ValueError('Invalid input euler angles.') + if len(convention) != 3: + raise ValueError('Convention must have 3 letters.') + if convention[1] in (convention[0], convention[2]): + raise ValueError(f'Invalid convention {convention}.') + for letter in convention: + if letter not in ('X', 'Y', 'Z'): + raise ValueError(f'Invalid letter {letter} in convention string.') + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, np.split(euler_angles, 3, axis=-1)) + ] + matrices = [x.squeeze(axis=-3) for x in matrices] + return np.matmul(np.matmul(matrices[0], matrices[1]), matrices[2]) + + +def euler_to_matrix_np(euler): + """Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler (np.ndarray) : (..., 3) + + Returns: + np.ndarray : (..., 3, 3) + """ + # euler: N*3 np array + euler_tensor = torch.tensor(euler) + matrix_tensor = euler_angles_to_matrix(euler_tensor, 'ZXY') + return np.array(matrix_tensor) diff --git a/data_preparation/utils/scannet_process.py b/data_preparation/utils/scannet_process.py new file mode 100644 index 0000000..cc2fbef --- /dev/null +++ b/data_preparation/utils/scannet_process.py @@ -0,0 +1,37 @@ +import os + +import numpy as np +from plyfile import PlyData + + +def process_scannet(scan_id: str, data_root: str, scannet_matrix: dict): + """Process scannet data. + + Args: + scan_id (str): ID of the scannet scan. + data_root (str): Root directory of the scannet dataset. + scannet_matrix (dict): Dict of axis alignment matrices + for each scan. + + Returns: + tuple : point_xyz and point_rgb infos. + """ + scan_ply_path = os.path.join(f'{data_root}/scans', scan_id, + scan_id + '_vh_clean_2.labels.ply') + data_color = PlyData.read( + os.path.join(f'{data_root}/scans', scan_id, + scan_id + '_vh_clean_2.ply')) + data = PlyData.read(scan_ply_path) + x = np.asarray(data.elements[0].data['x']).astype(np.float32) + y = np.asarray(data.elements[0].data['y']).astype(np.float32) + z = np.asarray(data.elements[0].data['z']).astype(np.float32) + pc = np.stack([x, y, z], axis=1) + r = np.asarray(data_color.elements[0].data['red']) + g = np.asarray(data_color.elements[0].data['green']) + b = np.asarray(data_color.elements[0].data['blue']) + pc_color = (np.stack([r, g, b], axis=1) / 255.0).astype(np.float32) + axis_align_matrix = scannet_matrix[scan_id] + pts = np.ones((pc.shape[0], 4), dtype=pc.dtype) + pts[:, :3] = pc + pc = np.dot(pts, axis_align_matrix.transpose())[:, :3] + return pc, pc_color, pc_color[:, 0] diff --git a/data_preparation/utils/trscan_process.py b/data_preparation/utils/trscan_process.py new file mode 100644 index 0000000..2726151 --- /dev/null +++ b/data_preparation/utils/trscan_process.py @@ -0,0 +1,44 @@ +import os + +import numpy as np +import torch +from pytorch3d.io import load_obj + + +def process_3rscan(scan_id: str, data_root: str, axis_align: dict): + """Process 3rscan data. + + Args: + scan_id (str): ID of the 3rscan scan. + data_root (str): Root directory of the 3rscan dataset. + axis_align (dict): Dict of axis alignment matrices + for each scan. + + Returns: + tuple : point_xyz and point_rgb infos. + """ + axis_align_matrix = axis_align[scan_id] + + lidar_obj_path = os.path.join(data_root, scan_id, 'mesh.refined.v2.obj') + + points, faces, aux = load_obj(lidar_obj_path) + constant = torch.ones((points.shape[0], 1)) + points_extend = (torch.concat([points, constant], dim=-1)).numpy() + + uvs = aux.verts_uvs + texture_images = aux.texture_images['mesh.refined_0'] + texture_images = torch.flip(texture_images, dims=[0]) + + uvs = uvs.unsqueeze(0) + texture_images = texture_images.unsqueeze(0) + + pc_colors = (torch.nn.functional.grid_sample( + texture_images.permute(0, 3, 1, 2), + (uvs * 2 - 1).unsqueeze(0), + align_corners=False, + ).squeeze(0).squeeze(1).permute(1, 0).numpy()) + + points_trans = np.array( + (points_extend @ axis_align_matrix.transpose())[:, :3]).astype( + np.float32) + return points_trans, pc_colors, pc_colors[:, 0] diff --git a/embodiedscan/datasets/__init__.py b/embodiedscan/datasets/__init__.py deleted file mode 100644 index 79276ac..0000000 --- a/embodiedscan/datasets/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .embodiedscan_dataset import EmbodiedScanDataset -from .mv_3dvg_dataset import MultiView3DGroundingDataset -from .transforms import * # noqa: F401,F403 - -__all__ = ['EmbodiedScanDataset', 'MultiView3DGroundingDataset'] diff --git a/embodiedscan/eval/metrics/__init__.py b/embodiedscan/eval/metrics/__init__.py deleted file mode 100644 index 2d8beb7..0000000 --- a/embodiedscan/eval/metrics/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .det_metric import IndoorDetMetric -from .grounding_metric import GroundingMetric -from .occupancy_metric import OccupancyMetric - -__all__ = ['IndoorDetMetric', 'OccupancyMetric', 'GroundingMetric'] diff --git a/embodiedscan/models/dense_heads/__init__.py b/embodiedscan/models/dense_heads/__init__.py deleted file mode 100644 index a0a9a83..0000000 --- a/embodiedscan/models/dense_heads/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -from .fcaf3d_head import FCAF3DHead, FCAF3DHeadRotMat -from .grounding_head import GroundingHead -from .imvoxel_occ_head import ImVoxelOccHead - -__all__ = ['FCAF3DHead', 'FCAF3DHeadRotMat', 'GroundingHead', 'ImVoxelOccHead'] diff --git a/embodiedscan/models/layers/__init__.py b/embodiedscan/models/layers/__init__.py deleted file mode 100644 index fb1a6d1..0000000 --- a/embodiedscan/models/layers/__init__.py +++ /dev/null @@ -1,3 +0,0 @@ -from .ground_transformer import SparseFeatureFusionTransformerDecoder - -__all__ = ['SparseFeatureFusionTransformerDecoder'] diff --git a/install.py b/install.py index 536e11a..94a8604 100644 --- a/install.py +++ b/install.py @@ -6,10 +6,12 @@ def run_subprocess(command): try: - process = subprocess.Popen(command, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, - universal_newlines=True) + process = subprocess.Popen( + command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True, + ) # Read output and error in real-time for line in process.stdout: @@ -46,31 +48,14 @@ def pytorch3d_links(): pyt_version_str = torch.__version__.split('+')[0].replace('.', '') cuda_version_str = torch.version.cuda.replace('.', '') version_str = ''.join([ - f'py3{sys.version_info.minor}_cu', cuda_version_str, - f'_pyt{pyt_version_str}' + f'py3{sys.version_info.minor}_cu', + cuda_version_str, + f'_pyt{pyt_version_str}', ]) pytorch3d_links = f'https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html' # noqa: E501 return pytorch3d_links -def mmcv_links(): - try: - import torch - except ImportError as e: - print('Pytorch is not installed.') - raise e - cuda_version = torch.version.cuda - if cuda_version is None: - print('Pytorch is cpu only.') - raise NotImplementedError - - cuda_version_str = torch.version.cuda.replace('.', '') - pyt_version = torch.__version__.split('+')[0].split('.') - pyt_version_mmcv = pyt_version[0] + '.' + pyt_version[1] - mmcv_links = f'https://download.openmmlab.com/mmcv/dist/cu{cuda_version_str}/torch{pyt_version_mmcv}/index.html' # noqa: E501 - return mmcv_links - - def install_package(line): pat = '(' + '|'.join(['>=', '==', '>', '<', '<=', '@']) + ')' parts = re.split(pat, line, maxsplit=1) @@ -80,16 +65,6 @@ def install_package(line): links = pytorch3d_links() run_subprocess( [sys.executable, '-m', 'pip', 'install', 'pytorch3d', '-f', links]) - elif package_name == 'mmcv': - links = mmcv_links() - run_subprocess( - [sys.executable, '-m', 'pip', 'install', line, '-f', links]) - elif package_name == 'MinkowskiEngine': - run_subprocess([sys.executable, '-m', 'pip', 'install', 'ninja']) - run_subprocess([ - sys.executable, '-m', 'pip', 'install', '-U', - 'git+https://github.com/NVIDIA/MinkowskiEngine', '--no-deps' - ]) # noqa: E501 else: run_subprocess([sys.executable, '-m', 'pip', 'install', line]) @@ -109,10 +84,10 @@ def install_requires(fname): args = parser.parse_args() install_requires('requirements/base.txt') - if args.mode == 'visual' or args.mode == 'all': - install_requires('requirements/visual.txt') + if args.mode == 'VG' or args.mode == 'all': + install_requires('requirements/VG.txt') - if args.mode == 'run' or args.mode == 'all': - install_requires('requirements/run.txt') + if args.mode == 'QA' or args.mode == 'all': + install_requires('requirements/QA.txt') run_subprocess([sys.executable, '-m', 'pip', 'install', '-e', '.']) diff --git a/mmscan/__init__.py b/mmscan/__init__.py new file mode 100644 index 0000000..f1b4675 --- /dev/null +++ b/mmscan/__init__.py @@ -0,0 +1,16 @@ +# flake8: noqa +from mmscan.mmscan import MMScan + +print('MMScan module loaded') +try: + from mmscan.evaluator.vg_evaluation import VisualGroundingEvaluator +except: + pass +try: + from mmscan.evaluator.qa_evaluation import QuestionAnsweringEvaluator +except: + pass +try: + from mmscan.evaluator.gpt_evaluation import GPTEvaluator +except: + pass diff --git a/mmscan/evaluator/gpt_evaluation.py b/mmscan/evaluator/gpt_evaluation.py new file mode 100644 index 0000000..8033f8b --- /dev/null +++ b/mmscan/evaluator/gpt_evaluation.py @@ -0,0 +1,261 @@ +import json +import random +import threading +from typing import List + +from openai import OpenAI +from tqdm import tqdm + +from mmscan.utils.lang_utils import qa_metric_map, qa_prompt_define + + +class GPTEvaluator: + """GPT metric, we set this for QA and Caption tasks. + + Args: + eval_size (int) : The number of samples to evaluate, -1 means + all samples. + Defaults to -1. + api_key (str) : The openai key. + model (str) : The GPT model to use, default we use "gpt-4o-mini". + Defaults to "gpt-4o-mini". + show_progress (bool) : Whether to print the evaluation results or not. + Defaults to False. + """ + + def __init__(self, + eval_size: int = -1, + api_key: str = '', + model: str = 'gpt-4o-mini', + show_progress: bool = False): + self.eval_size = eval_size + self.model = model + self.show_progress = show_progress + self.client = OpenAI(api_key) + self.qa_metric = [ + 'STa', + 'STs', + 'OOa', + 'OOs', + 'OR', + 'overall', + 'Advanced', + ] + + def normal_query(self, + system_prompt: str, + user_content_groups: List[str], + max_tokens: int = 1000) -> dict: + """Calling the GPT api, return the results in the format of json. + + Args: + system_prompt (str) : + The system prompt inputted into GPT. + user_content_grounps (list[str]) : + The user content inputted into GPT. + max_tokens (int) : Max tokens. Defaults to 1000. + + Returns: + dict : The json-format result. + """ + + messages = [] + if system_prompt: + messages.append({'role': 'system', 'content': system_prompt}) + for content_group in user_content_groups: + messages.append({'role': 'user', 'content': content_group}) + + response = self.client.chat.completions.create( + model=self.model, + messages=messages, + response_format={'type': 'json_object'}, + max_tokens=max_tokens, + ) + response = json.loads(response.choices[0].message.content) + return response + + def qa_evaluation(self, all_samples: dict, thread_index: int, + tmp_path: str) -> None: + """Employ the GPT evaluator. + + Args: + all_samples (dict) : The QA sample dict with QA_ID as keys and + [gt, pred, question] as values. + thread_index (int) : The index of the thread. + tmp_path (str) : The path to store the + tmp-stored json files. + """ + + system_prompt, ex_instance = qa_prompt_define() + + # Define the number of retries + MAXTRY = 3 + gpt_eval_results = {} + + for sample_id in tqdm(all_samples): + GPT_INTPUT = { + 'Question': all_samples[sample_id]['question'], + 'Model Answer': all_samples[sample_id]['pred'], + 'Human Answer': all_samples[sample_id]['gt'][0], + } + + for _ in range(MAXTRY): + FLAG = False + try: + GPT_OUTPUT = self.normal_query(system_prompt + ex_instance, + [str(GPT_INTPUT)]) + # check the result forms + assert ('All key points' in GPT_OUTPUT + and 'Correct Number' in GPT_OUTPUT + and 'Wrong/Missing Number' in GPT_OUTPUT + and 'Reasons' in GPT_OUTPUT) + assert (len(GPT_OUTPUT['All key points']) + == int(GPT_OUTPUT['Correct Number']) + + int(GPT_OUTPUT['Wrong/Missing Number']) + and len(GPT_OUTPUT['All key points']) > 0) + + FLAG = True + except Exception: + + continue + if FLAG: + gpt_eval_results[sample_id] = GPT_OUTPUT + + with open( + tmp_path.replace('.json', + '_thread' + str(thread_index) + '.json'), + 'w', + ) as f: + json.dump(gpt_eval_results, f, indent=4) + + def qa_collection(self, num_threads: int, tmp_path: str) -> dict: + """Collect the gpt-eval results from the tmp-stored json files. + + Args: + num_threads (int) : + The number of threads used to evaluate the samples. + tmp_path (str) : + The path to store the tmp-stored json files. + + Returns: + dict : The evaluation result. + """ + + eval_dict = {metric: [] for metric in self.qa_metric} + static_result = {} + for thread_index in range(num_threads): + with open( + tmp_path.replace('.json', + '_thread' + str(thread_index) + '.json'), + 'r', + ) as f: + thread_result = json.load(f) + for qa_id in thread_result: + static_result[qa_id] = thread_result[qa_id] + for qa_id in static_result: + if len(static_result[qa_id]['All key points']) == 0: + continue + eval_dict[qa_metric_map(qa_id.split('__')[0])].append( + int(static_result[qa_id]['Correct Number']) / + (int(static_result[qa_id]['Correct Number']) + + int(static_result[qa_id]['Wrong/Missing Number']))) + eval_dict['overall'].append( + int(static_result[qa_id]['Correct Number']) / + (int(static_result[qa_id]['Correct Number']) + + int(static_result[qa_id]['Wrong/Missing Number']))) + for metric in eval_dict: + eval_dict[metric] = (sum(eval_dict[metric]) / + len(eval_dict[metric]) + if len(eval_dict[metric]) > 0 else None) + + return eval_dict + + def load_and_eval(self, + raw_batch_input: List[dict], + num_threads: int = 1, + tmp_path: str = './') -> dict: + """Load the batch of results and evaluate. + + Args: + raw_batch_input (list[dict]) : + The batch of results wanted to evaluate + num_threads (int) : The number of the threadings. + Defaults to 1. + tmp_path (str) : The temporay path to store the json files. + + Returns: + dict : The evaluation result. + """ + + # (1) Update the results and store in the dict. + + batch_result = {} + self.__check_format__(raw_batch_input) + for _input in raw_batch_input: + batch_result[_input['ID']] = _input + + # (2) Evaluate the QA task. + if self.eval_size == -1: + num_sample = len(batch_result) + else: + num_sample = self.eval_size + qa_sample = random.sample(list(batch_result.keys()), num_sample) + threads = [] + qa_ids = list(qa_sample) + IDs_divide_index = [] + for _index in range(num_threads): + IDs_divide_index.append( + qa_ids[len(qa_ids) // num_threads * _index:len(qa_ids) // + num_threads * (_index + 1)]) + + for thread_index in range(num_threads): + # Create a sub-dictionary for each thread + partial_samples = { + ID_: batch_result[ID_] + for ID_ in IDs_divide_index[thread_index] + } + if self.show_progress: + print( + f'Thread {thread_index} processing {len(partial_samples)}') + thread = threading.Thread( + target=self.qa_evaluation, + args=(partial_samples, thread_index, + tmp_path + '/gpt_QA.json'), + ) + threads.append(thread) + + for thread in threads: + thread.start() + for thread in threads: + thread.join() + if self.show_progress: + print(f'the results are store under {tmp_path}') + + # (3) Collect the results. + eval_dict = self.qa_collection(num_threads, tmp_path + '/gpt_QA.json') + + return eval_dict + + def __check_format__(self, raw_input): + """Check if the input conform with mmscan evaluation format. The input + to be checked, should be a list of dict. Every item with the keys: + + ["ID","question","pred",""gt"] pred is a list with one one element. gt + is a list with >=1 elements. "ID" should be unique. + + Args: + raw_input (list[dict]) : The input to be checked. + """ + assert isinstance( + raw_input, + list), 'The input of MMScan evaluator should be a list of dict. ' + + for _index in range(len(raw_input)): + assert 'ID' in raw_input[_index] + assert ('pred' in raw_input[_index] + and isinstance(raw_input[_index]['pred'], list) + and len(raw_input[_index]['pred']) == 1) + assert ('gt' in raw_input[_index] + and isinstance(raw_input[_index]['gt'], list) + and len(raw_input[_index]['gt']) >= 1) + assert 'question' in raw_input[_index] diff --git a/mmscan/evaluator/metrics/box_metric.py b/mmscan/evaluator/metrics/box_metric.py new file mode 100644 index 0000000..b09aaa8 --- /dev/null +++ b/mmscan/evaluator/metrics/box_metric.py @@ -0,0 +1,277 @@ +from typing import Dict, Tuple, Union + +import numpy as np +import torch +from scipy.optimize import linear_sum_assignment + + +def average_precision(recalls: np.ndarray, + precisions: np.ndarray, + mode: str = 'area') -> np.ndarray: + """Calculate average precision (for single or multiple scales). + + Args: + recalls (np.ndarray): Recalls with shape of (num_scales, num_dets) + or (num_dets, ). + precisions (np.ndarray): Precisions with shape of + (num_scales, num_dets) or (num_dets, ). + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + Defaults to 'area'. + + Returns: + np.ndarray: Calculated average precision. + """ + if recalls.ndim == 1: + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + + assert recalls.shape == precisions.shape + assert recalls.ndim == 2 + + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + return ap + + +def get_f1_scores(iou_matrix: Union[np.ndarray, torch.tensor], + iou_threshold) -> float: + """Refer to the algorithm in Multi3DRefer to compute the F1 score. + + Args: + iou_matrix (ndarray/tensor): + The iou matrix of the predictions and ground truths with + shape (num_preds , num_gts) + iou_threshold (float): 0.25/0.5 + + Returns: + float: the f1 score as the result + """ + iou_thr_tp = 0 + pred_bboxes_count, gt_bboxes_count = iou_matrix.shape + + square_matrix_len = max(gt_bboxes_count, pred_bboxes_count) + iou_matrix_fill = np.zeros(shape=(square_matrix_len, square_matrix_len), + dtype=np.float32) + iou_matrix_fill[:pred_bboxes_count, :gt_bboxes_count] = iou_matrix + + # apply matching algorithm + row_idx, col_idx = linear_sum_assignment(iou_matrix_fill * -1) + + # iterate matched pairs, check ious + for i in range(pred_bboxes_count): + iou = iou_matrix[row_idx[i], col_idx[i]] + # calculate true positives + if iou >= iou_threshold: + iou_thr_tp += 1 + + # calculate precision, recall and f1-score for the current scene + f1_score = 2 * iou_thr_tp / (pred_bboxes_count + gt_bboxes_count) + + return f1_score + + +def __get_fp_tp_array__(iou_array: Union[np.ndarray, torch.tensor], + iou_threshold: float) \ + -> Tuple[np.ndarray, np.ndarray]: + """Compute the False-positive and True-positive array for each prediction. + + Args: + iou_array (ndarray/tensor): + the iou matrix of the predictions and ground truths + (shape num_preds, num_gts) + iou_threshold (float): 0.25/0.5 + + Returns: + np.ndarray, np.ndarray: (len(preds)), + the false-positive and true-positive array for each prediction. + """ + gt_matched_records = np.zeros((len(iou_array[0])), dtype=bool) + tp_thr = np.zeros((len(iou_array))) + fp_thr = np.zeros((len(iou_array))) + + for d, _ in enumerate(range(len(iou_array))): + iou_max = -np.inf + cur_iou = iou_array[d] + num_gts = cur_iou.shape[0] + + if num_gts > 0: + for j in range(num_gts): + iou = cur_iou[j] + if iou > iou_max: + iou_max = iou + jmax = j + + if iou_max >= iou_threshold: + if not gt_matched_records[jmax]: + gt_matched_records[jmax] = True + tp_thr[d] = 1.0 + else: + fp_thr[d] = 1.0 + else: + fp_thr[d] = 1.0 + + return fp_thr, tp_thr + + +def subset_get_average_precision(subset_results: dict, + iou_thr: float)\ + -> Tuple[np.ndarray, np.ndarray]: + """Return the average precision and max recall for a given iou array, + "subset" version while the num_gt of each sample may differ. + + Args: + subset_results (dict): + The results, consisting of scores, sample_indices, ious. + sample_indices means which sample the prediction belongs to. + iou_threshold (float): 0.25/0.5 + + Returns: + Tuple[np.ndarray, np.ndarray]: the average precision and max recall. + """ + confidences = subset_results['scores'] + sample_indices = subset_results['sample_indices'] + ious = subset_results['ious'] + gt_matched_records = {} + total_gt_boxes = 0 + for i, sample_idx in enumerate(sample_indices): + if sample_idx not in gt_matched_records: + gt_matched_records[sample_idx] = np.zeros((len(ious[i]), ), + dtype=bool) + total_gt_boxes += ious[i].shape[0] + + confidences = np.array(confidences) + sorted_inds = np.argsort(-confidences) + sample_indices = [sample_indices[i] for i in sorted_inds] + ious = [ious[i] for i in sorted_inds] + + tp_thr = np.zeros(len(sample_indices)) + fp_thr = np.zeros(len(sample_indices)) + + for d, sample_idx in enumerate(sample_indices): + iou_max = -np.inf + cur_iou = ious[d] + num_gts = cur_iou.shape[0] + if num_gts > 0: + for j in range(num_gts): + iou = cur_iou[j] + if iou > iou_max: + iou_max = iou + jmax = j + + if iou_max >= iou_thr: + if not gt_matched_records[sample_idx][jmax]: + gt_matched_records[sample_idx][jmax] = True + tp_thr[d] = 1.0 + else: + fp_thr[d] = 1.0 + else: + fp_thr[d] = 1.0 + + fp = np.cumsum(fp_thr) + tp = np.cumsum(tp_thr) + recall = tp / float(total_gt_boxes) + precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + + return average_precision(recall, precision), np.max(recall) + + +def get_average_precision(iou_array: np.ndarray, iou_threshold: float) \ + -> Tuple[np.ndarray, np.ndarray]: + """Return the average precision and max recall for a given iou array. + + Args: + iou_array (ndarray/tensor): + The iou matrix of the predictions and ground truths + (shape len(preds)*len(gts)) + iou_threshold (float): 0.25/0.5 + + Returns: + Tuple[np.ndarray, np.ndarray]: the average precision and max recall. + """ + fp, tp = __get_fp_tp_array__(iou_array, iou_threshold) + fp_cum = np.cumsum(fp) + tp_cum = np.cumsum(tp) + recall = tp_cum / float(iou_array.shape[1]) + precision = tp_cum / np.maximum(tp_cum + fp_cum, np.finfo(np.float64).eps) + + return average_precision(recall, precision), np.max(recall) + + +def get_general_topk_scores(iou_array: Union[np.ndarray, torch.tensor], + iou_threshold: float, + mode: str = 'sigma') -> Dict[str, float]: + """Compute the multi-topk metric, we provide two modes. + + Args: + iou_array (ndarray/tensor): + the iou matrix of the predictions and ground truths + (shape len(preds)*len(gts)) + iou_threshold (float): 0.25/0.5 + mode (str): 'sigma'/'simple' + "simple": 1/N * Hit(min(N*k,len(pred))) + "sigma": 1/N * Sigma [Hit(min(n*k,len(pred)))>=n] n = 1~N + Hit(M) return the number of gtound truths hitted by + the first M predictions. + N = the number of gtound truths + Default to 'sigma'. + + Returns: + Dict[str,float]: the score of multi-topk metric. + """ + + assert mode in ['sigma', 'simple'] + topk_scores = [] + gt_matched_records = np.zeros(len(iou_array[0])) + num_gt = len(gt_matched_records) + for d, _ in enumerate(range(len(iou_array))): + iou_max = -np.inf + cur_iou = iou_array[d] + + for j in range(len(iou_array[d])): + iou = cur_iou[j] + if iou > iou_max: + iou_max = iou + j_max = j + if iou_max >= iou_threshold: + gt_matched_records[j_max] = True + topk_scores.append(gt_matched_records.copy()) + + topk_results = {} + for topk in [1, 3, 5, 10]: + if mode == 'sigma': + scores = [ + int( + np.sum(topk_scores[min(n * topk, len(topk_scores)) - + 1]) >= n) for n in range(1, num_gt + 1) + ] + result = np.sum(scores) / num_gt + else: + query_index = min(num_gt * topk, len(topk_scores)) - 1 + result = np.sum(topk_scores[query_index]) / num_gt + topk_results[f'gTop-{topk}@{iou_threshold}'] = result + return topk_results diff --git a/mmscan/evaluator/metrics/lang_metric.py b/mmscan/evaluator/metrics/lang_metric.py new file mode 100644 index 0000000..341ed55 --- /dev/null +++ b/mmscan/evaluator/metrics/lang_metric.py @@ -0,0 +1,302 @@ +from collections import defaultdict +from typing import List, Tuple + +import torch +from pycocoevalcap.bleu.bleu import Bleu +from pycocoevalcap.cider.cider import Cider +from pycocoevalcap.meteor.meteor import Meteor +from pycocoevalcap.rouge.rouge import Rouge +from pycocoevalcap.spice.spice import Spice +from pycocoevalcap.tokenizer.ptbtokenizer import PTBTokenizer +from scipy.spatial.distance import cosine +from sentence_transformers import SentenceTransformer, util +from transformers import AutoModel, AutoTokenizer + + +def to_coco(kvs, keys): + res = defaultdict(list) + for k in keys: + if k in kvs: + caps = kvs[k] + for c in caps: + res[k].append({'caption': c}) + else: + res[k].append({'caption': ''}) + return res + + +def coco_evaluation(batch_input: List[dict]) -> Tuple[dict, dict]: + """Calculate the extract matching score for each item. + Args: + batch_input(list[dict]): + [{ + "pred": [str], + "gt":[str,...] + },...] + + Returns: + dict, dict: final_scores stores the score of each metric + """ + + prediction = {} + ground_truths = {} + + for _input in batch_input: + prediction[_input['ID']] = _input['pred'] + ground_truths[_input['ID']] = _input['gt'] + + scorers = [ + (Bleu(4), ['Bleu_1', 'Bleu_2', 'Bleu_3', 'Bleu_4']), + (Meteor(), 'METEOR'), + (Rouge(), 'ROUGE_L'), + (Cider(), 'CIDEr'), + (Spice(), 'SPICE'), + ] + + tokenizer = PTBTokenizer() + ref_sent = ground_truths + hypo_sent = prediction + final_scores = {} + final_list = {} + ref_coco = tokenizer.tokenize(to_coco(ref_sent, ref_sent.keys())) + hypo_coco = tokenizer.tokenize(to_coco(hypo_sent, ref_sent.keys())) + + for scorer, method in scorers: + score, scores = scorer.compute_score(ref_coco, hypo_coco) + if type(score) == list: + for m, s, s_ in zip(method, score, scores): + final_scores[m] = s + final_list[m] = s_ + else: + final_scores[method] = score + final_list[method] = scores + + return final_scores, final_list + + +def em_evaluation(batch_input: List[dict]) -> Tuple[list, list]: + """Calculate the extract matching score for each item. + Args: + batch_input(list[dict]): + [{ + "pred": [str], + "gt":[str,...] + },...] + + Returns: + list[float]: (refined) extract matching score for each item + """ + # EM + em_result = [] + for _input in batch_input: + pred = _input['pred'][0] + gts = _input['gt'] + if pred in gts: + em_result.append(1) + else: + em_result.append(0) + + # refined EM + refine_em_result = [] + + for _input in batch_input: + correct = 0 + pred = _input['pred'][0] + gts = _input['gt'] + + if len(pred.split()) == 0: + pred = '@@@@@@@@-= Empty Answer =-@@@@@@@@@' + for gt in gts: + if pred == gt: + correct = 1 + continue + elif ''.join(pred.split()) in ''.join(gt.split()): + correct = 1 + continue + elif ''.join(gt.split()) in ''.join(pred.split()): + correct = 1 + continue + refine_em_result.append(correct) + return em_result, refine_em_result + + +class SimCSEEvaluator: + """A class for calculating the simcse similarity score. Using Sentence + Embeddings to calculate similarity between pred/gt。 + + Args: + model_path: path to the simcse pretrained model. + """ + + def __init__(self, model_path: str, eval_bs: int = 500) -> None: + if len(model_path) == 0: + model_path = 'princeton-nlp/sup-simcse-roberta-large' + self.eval_bs = eval_bs + self.simcse_tokenizer = AutoTokenizer.from_pretrained(model_path) + self.simcse_model = AutoModel.from_pretrained(model_path).to('cuda') + + def __batch_evaluation__(self, all_pred: List[str], all_gt: List[str], + gt_count: List[int]) -> List[float]: + """Using Sentence Embeddings to calculate similarity between pred/gt in + a batch. + + Args: + all_pred(list[str]): all prediction + all_gt(list[str]): all ground truth + gt_count(list[int]): + stores number of possible answers to a question + tips: len(all_gt)>=len(all_pred) + there may be multiple gt answers for a question. + + Return: + list[float]: Simcse similarity of each pred/gts pair. + """ + len_of_pred = len(all_pred) + with torch.no_grad(): + inputs = self.simcse_tokenizer( + all_pred + all_gt, + padding=True, + truncation=True, + return_tensors='pt', + ).to('cuda') + simcse_embeddings = self.simcse_model( + **inputs, output_hidden_states=True, + return_dict=True).pooler_output + + all_pred_simcse_embed = simcse_embeddings[:len_of_pred] + all_gt_simcse_embed = simcse_embeddings[len_of_pred:] + all_simcse_sim = [] + + accumulated = 0 + for i in range(len(all_pred)): + simcse_similarity = -100 + for j in range(accumulated, accumulated + gt_count[i]): + simcse_similarity = max( + simcse_similarity, + 1 - cosine( + all_pred_simcse_embed[i].cpu().detach().numpy(), + all_gt_simcse_embed[j].cpu().detach().numpy(), + ), + ) + + all_simcse_sim.append(simcse_similarity) + accumulated += gt_count[i] + torch.cuda.empty_cache() + return all_simcse_sim + + def evaluation(self, batch_input: List[dict]) -> List[float]: + """Calculate the simcse similarity score for each item. + Args: + batch_input(list[dict]): + [{ + "pred": [str], + "gt":[str,...] + },...] + + Returns: + list[float]: simcse similarity for each item + """ + all_simcse_similarity = [] + batch_lan_pred = [] + batch_lan_gt = [] + count_gt = [] + + for idx, _item in enumerate(batch_input): + batch_lan_pred.extend(_item['pred']) + batch_lan_gt.extend(_item['gt']) + count_gt.extend([len(_item['gt'])]) + + if (idx + 1) % self.eval_bs == 0 or idx == len(batch_input) - 1: + all_simcse_similarity += self.__batch_evaluation__( + batch_lan_pred, batch_lan_gt, count_gt) + batch_lan_pred = [] + batch_lan_gt = [] + count_gt = [] + + return all_simcse_similarity + + +class SBERTEvaluator: + """A class for calculating the sbert similarity score. Using Sentence-BERT + to calculate similarity between pred/gt. + + Args: + model_path: path to the sbert pretrained model. + """ + + def __init__(self, model_path: str, eval_bs: int = 500) -> None: + if len(model_path) == 0: + model_path = 'all-mpnet-base-v2' + self.eval_bs = eval_bs + self.sbert_model = SentenceTransformer(model_path, device='cuda') + + def __batch_evaluation__(self, all_pred: List[str], all_gt: List[str], + gt_count: List[int]) -> List[float]: + """Using Sentence-BERT to calculate similarity between pred/gt in a + batch. + + Args: + all_pred(list[str]): all prediction + all_gt(list[str]): all ground truth + gt_count(list[int]): stores number of possible + answers to a question + tips: len(all_gt)>=len(all_pred) because there may be multiple + gt answers for a question. + + Return: + list[float]: Sentence-BERT similarity of each pred/gts pair. + """ + len_of_pred = len(all_pred) + with torch.no_grad(): + sbert_embeddings = self.sbert_model.encode(all_pred + all_gt, + show_progress_bar=False, + device='cuda') + + all_pred_sbert_embed = sbert_embeddings[:len_of_pred] + all_gt_sbert_embed = sbert_embeddings[len_of_pred:] + all_sbert_sim = [] + + accumulated = 0 + for i in range(len(all_pred)): + sbert_similarity = -100 + for j in range(accumulated, accumulated + gt_count[i]): + sbert_similarity = max( + sbert_similarity, + util.cos_sim(all_pred_sbert_embed[i], + all_gt_sbert_embed[j])[0][0].item(), + ) + all_sbert_sim.append(sbert_similarity) + accumulated += gt_count[i] + torch.cuda.empty_cache() + return all_sbert_sim + + def evaluation(self, batch_input: List[dict]) -> List[float]: + """Calculate the simcse similarity score for each item. + Args: + batch_input(list[dict]): + [{ + "pred": [str], + "gt":[str,...] + },...] + + Returns: + list[float]: simcse similarity for each item + """ + all_sbert_similarity = [] + batch_lan_pred = [] + batch_lan_gt = [] + count_gt = [] + + for idx, _item in enumerate(batch_input): + batch_lan_pred.extend(_item['pred']) + batch_lan_gt.extend(_item['gt']) + count_gt.extend([len(_item['gt'])]) + + if (idx + 1) % self.eval_bs == 0 or idx == len(batch_input) - 1: + all_sbert_similarity += self.__batch_evaluation__( + batch_lan_pred, batch_lan_gt, count_gt) + batch_lan_pred = [] + batch_lan_gt = [] + count_gt = [] + + return all_sbert_similarity diff --git a/mmscan/evaluator/qa_evaluation.py b/mmscan/evaluator/qa_evaluation.py new file mode 100644 index 0000000..d67e640 --- /dev/null +++ b/mmscan/evaluator/qa_evaluation.py @@ -0,0 +1,202 @@ +from typing import List + +import torch + +from mmscan.evaluator.metrics.lang_metric import (SBERTEvaluator, + SimCSEEvaluator, + coco_evaluation, + em_evaluation) +from mmscan.utils.lang_utils import special_token_filter + + +class QuestionAnsweringEvaluator: + """Tradition metrics for QA and Caption evaluation , consists the + implements of. + + [EM, BLEU, METEOR, ROUGE, CIDEr, SPICE, SIMCSE, SBERT] + SIMCSE, SBERT is speacial metrics and needed GPU. + + Attributes: + save_buffer(list[dict]): Save the buffer of Inputs. + records(list[dict]): Metric results for each sample. + metric_record(dict): Metric results for each category. + (average of all samples with the same category) + Args: + model_config(dict): The model config for special metric evaluation. + Defaults to {}. + max_length(int): The maximum length of the input. + Defaults to 1024. + show_results(bool): Whether to print the evaluation results. + Defaults to True. + """ + + def __init__(self, + model_config: dict = {}, + max_length: int = 256, + show_results: bool = True) -> None: + self.eval_bs = 500 + self.show_results = show_results + self.max_length = max_length + self.special_metric = [] + if 'simcse' in model_config and torch.cuda.is_available(): + self.special_metric.append('simcse') + self.simcse_evaluator = SimCSEEvaluator(model_config['simcse'], + eval_bs=self.eval_bs) + if 'sbert' in model_config and torch.cuda.is_available(): + self.special_metric.append('sbert') + self.sbert_evaluator = SBERTEvaluator(model_config['sbert'], + eval_bs=self.eval_bs) + + self.eval_metric = [ + 'EM', + 'refined_EM', + 'Bleu_1', + 'Bleu_2', + 'Bleu_3', + 'Bleu_4', + 'METEOR', + 'ROUGE_L', + 'CIDEr', + 'SPICE', + ] + self.special_metric + + self.reset() + + def reset(self) -> None: + """Reset the evaluator, clear the buffer and records.""" + self.metric_record = {} + self.save_results = {} + self.save_buffer = [] + self.records = [] + + def update(self, batch_input: List[dict]) -> dict: + """Update a batch of results to the buffer, and then filtering and + truncating. each item is expected to be a dict with keys. + + ["index", "ID","question","pred","gt"] + + 1. pred is a list with one one element. + 2. gt is a list with >=1 elements. + 3. "ID" should be unique. + + Args: + batch_input (list[dict]): + Batch of the raw original input. + + Returns: + Dict: {"EM":EM metric for this batch, + "refined_EM":Refined EM metric for this batch} + """ + + self.__check_format__(batch_input) + + for _input in batch_input: + _input['pred'] = [ + special_token_filter( + _input['pred'][0], + clean=True, + truncation=True, + max_length=self.max_length, + ) + ] + _input['gt'] = [ + special_token_filter(i, + clean=True, + truncation=True, + max_length=self.max_length) + for i in _input['gt'] + ] + + self.save_buffer.extend(batch_input) + + EM_, refine_EM_ = em_evaluation(batch_input) + return { + 'EM': sum(EM_) / len(EM_), + 'refined_EM': sum(refine_EM_) / len(refine_EM_), + } + + def start_evaluation(self) -> dict: + """Start the evaluation process. + + Returns: + dict: The results of the evaluation. + """ + + # (1) exact match evaluation + EM_, refine_EM_ = em_evaluation(self.save_buffer) + + # (2) coco metric evaluation + coco_scores, coco_scores_list = coco_evaluation(self.save_buffer) + + # (3) special metric evaluation, forward one time each batch + if 'simcse' in self.special_metric: + all_simcse_similarity = self.simcse_evaluator.evaluation( + self.save_buffer) + if 'sbert' in self.special_metric: + all_sbert_similarity = self.sbert_evaluator.evaluation( + self.save_buffer) + + # (1) store for every sample + store_dict = {'EM': EM_, 'refined_EM': refine_EM_} + + for metric_ in coco_scores: + if metric_ == 'SPICE': + store_dict[metric_] = [ + item['All'] for item in coco_scores_list['SPICE'] + ] + else: + store_dict[metric_] = coco_scores_list[metric_] + if 'simcse' in self.special_metric: + store_dict['simcse'] = all_simcse_similarity + if 'sbert' in self.special_metric: + store_dict['sbert'] = all_sbert_similarity + + for _index, item_dict in enumerate(self.save_buffer): + for metric in store_dict: + item_dict[metric] = store_dict[metric][_index] + + self.records.append(item_dict) + + # (2) return the final mean metric + + eval_dict = {} + for metric in self.eval_metric: + if metric not in coco_scores.keys(): + eval_dict.update({ + metric: + sum(store_dict[metric]) / len(store_dict[metric]) + }) + else: + eval_dict[metric] = coco_scores[metric] + self.metric_record = eval_dict + + if self.show_results: + print(eval_dict) + + return eval_dict + + def __check_format__(self, raw_input: List[dict]) -> None: + """Check if the input conform with mmscan evaluation format. + + Every item with the keys ["index", "ID","question","pred","gt"], + 'pred' is a list with one one element, 'gt' is a list + with >=1 elements. "ID" should be unique. + Args: + raw_input (list[dict]): The input to be checked. + """ + assert isinstance( + raw_input, + list), 'The input of QA evaluator should be a list of dict. ' + + for _index in range(len(raw_input)): + if 'index' not in raw_input[_index]: + raw_input[_index]['index'] = len(self.save_buffer) + _index + + assert 'ID' in raw_input[_index] + assert ('pred' in raw_input[_index] + and isinstance(raw_input[_index]['pred'], list) + and len(raw_input[_index]['pred']) == 1) + assert ('gt' in raw_input[_index] + and isinstance(raw_input[_index]['gt'], list) + and len(raw_input[_index]['gt']) >= 1) + assert 'question' in raw_input[_index] diff --git a/mmscan/evaluator/vg_evaluation.py b/mmscan/evaluator/vg_evaluation.py new file mode 100644 index 0000000..958d9e9 --- /dev/null +++ b/mmscan/evaluator/vg_evaluation.py @@ -0,0 +1,334 @@ +from typing import List, Tuple + +import numpy as np +import torch +from terminaltables import AsciiTable +from tqdm import tqdm + +from mmscan.evaluator.metrics.box_metric import (get_average_precision, + get_general_topk_scores, + subset_get_average_precision) +from mmscan.utils.box_utils import index_box, to_9dof_box + + +class VisualGroundingEvaluator: + """Evaluator for MMScan Visual Grounding benchmark. The evaluation metric + includes "AP","AP_C","AR","gTop-k". + + Attributes: + save_buffer(list[dict]): Save the buffer of Inputs. + + records(list[dict]): Metric results for each sample + + category_records(dict): Metric results for each category + (average of all samples with the same category) + Args: + show_results(bool): Whether to print the evaluation results. + Defaults to True. + """ + + def __init__(self, show_results: bool = True) -> None: + + self.show_results = show_results + self.eval_metric_type = ['AP', 'AR'] + self.top_k_visible = [1, 3, 5] + self.call_for_category_mode = True + + for top_k in [1, 3, 5, 10]: + self.eval_metric_type.append(f'gTop-{top_k}') + + self.iou_thresholds = [0.25, 0.50] + self.eval_metric = [] + for iou_thr in self.iou_thresholds: + for eval_type in self.eval_metric_type: + self.eval_metric.append(eval_type + '@' + str(iou_thr)) + + self.reset() + + def reset(self) -> None: + """Reset the evaluator, clear the buffer and records.""" + self.save_buffer = [] + self.records = [] + self.category_records = {} + + def update(self, raw_batch_input: List[dict]) -> None: + """Update a batch of results to the buffer. + + Args: + raw_batch_input (list[dict]): + Batch of the raw original input. + """ + self.__check_format__(raw_batch_input) + self.save_buffer.extend(raw_batch_input) + + def start_evaluation(self) -> dict: + """This function is used to start the evaluation process. + + It will iterate over the saved buffer and evaluate each item. + Returns: + category_records(dict): Metric results per category. + """ + + category_collect = {} + + for data_item in tqdm(self.save_buffer): + metric_for_single = {} + + # (1) len(gt)==0 : skip + if self.__is_zero__(data_item['gt_bboxes']): + continue + + # (2) len(pred)==0 : model's wrong + if self.__is_zero__(data_item['pred_bboxes']): + for iou_thr in self.iou_thresholds: + metric_for_single[f'AP@{iou_thr}'] = 0 + for topk in [1, 3, 5, 10]: + metric_for_single[f'gTop-{topk}@{iou_thr}'] = 0 + + data_item['num_gts'] = len(data_item['gt_bboxes']) + data_item.update(metric_for_single) + self.records.append(data_item) + continue + + iou_array, pred_score = self.__calculate_iou_array_(data_item) + if self.call_for_category_mode: + category = self.__category_mapping__(data_item['subclass']) + if category not in category_collect.keys(): + category_collect[category] = { + 'ious': [], + 'scores': [], + 'sample_indices': [], + 'cnt': 0, + } + + category_collect[category]['ious'].extend(iou_array) + category_collect[category]['scores'].extend(pred_score) + category_collect[category]['sample_indices'].extend( + [data_item['index']] * len(iou_array)) + category_collect[category]['cnt'] += 1 + + for iou_thr in self.iou_thresholds: + # AP/AR metric + AP, AR = get_average_precision(iou_array, iou_thr) + metric_for_single[f'AP@{iou_thr}'] = AP + metric_for_single[f'AR@{iou_thr}'] = AR + + # topk metric + metric_for_single.update( + get_general_topk_scores(iou_array, iou_thr)) + + data_item['num_gts'] = iou_array.shape[1] + data_item.update(metric_for_single) + self.records.append(data_item) + + self.collect_result() + + if self.call_for_category_mode: + for iou_thr in self.iou_thresholds: + self.category_records['overall'][f'AP_C@{iou_thr}'] = 0 + self.category_records['overall'][f'AR_C@{iou_thr}'] = 0 + + for category in category_collect: + AP_C, AR_C = subset_get_average_precision( + category_collect[category], iou_thr) + self.category_records[category][f'AP_C@{iou_thr}'] = AP_C + self.category_records[category][f'AR_C@{iou_thr}'] = AR_C + self.category_records['overall'][f'AP_C@{iou_thr}'] += ( + AP_C * category_collect[category]['cnt'] / + len(self.records)) + self.category_records['overall'][f'AR_C@{iou_thr}'] += ( + AR_C * category_collect[category]['cnt'] / + len(self.records)) + + return self.category_records + + def collect_result(self) -> dict: + """Collect the result from the evaluation process. + + Stores them based on their subclass. + Returns: + category_results(dict): Average results per category. + """ + category_results = {} + category_results['overall'] = {} + + for metric_name in self.eval_metric: + category_results['overall'][metric_name] = [] + category_results['overall']['num_gts'] = 0 + + for data_item in self.records: + category = self.__category_mapping__(data_item['subclass']) + + if category not in category_results: + category_results[category] = {} + for metric_name in self.eval_metric: + category_results[category][metric_name] = [] + category_results[category]['num_gts'] = 0 + + for metric_name in self.eval_metric: + for metric_name in self.eval_metric: + category_results[category][metric_name].append( + data_item[metric_name]) + category_results['overall'][metric_name].append( + data_item[metric_name]) + + category_results['overall']['num_gts'] += data_item['num_gts'] + category_results[category]['num_gts'] += data_item['num_gts'] + for category in category_results: + for metric_name in self.eval_metric: + category_results[category][metric_name] = np.mean( + category_results[category][metric_name]) + + self.category_records = category_results + + return category_results + + def print_result(self) -> str: + """Showing the result table. + + Returns: + table(str): The metric result table. + """ + assert len(self.category_records) > 0, 'No result yet.' + self.category_records = { + key: self.category_records[key] + for key in sorted(self.category_records.keys(), reverse=True) + } + + header = ['Type'] + header.extend(self.category_records.keys()) + table_columns = [[] for _ in range(len(header))] + + # some metrics + for iou_thr in self.iou_thresholds: + show_in_table = (['AP', 'AR'] + + [f'gTop-{k}' for k in self.top_k_visible] + if not self.call_for_category_mode else + ['AP', 'AR', 'AP_C', 'AR_C'] + + [f'gTop-{k}' for k in self.top_k_visible]) + + for metric_type in show_in_table: + table_columns[0].append(metric_type + ' ' + str(iou_thr)) + + for i, category in enumerate(self.category_records.keys()): + ap = self.category_records[category][f'AP@{iou_thr}'] + ar = self.category_records[category][f'AR@{iou_thr}'] + table_columns[i + 1].append(f'{float(ap):.4f}') + table_columns[i + 1].append(f'{float(ar):.4f}') + + ap = self.category_records[category][f'AP_C@{iou_thr}'] + ar = self.category_records[category][f'AR_C@{iou_thr}'] + table_columns[i + 1].append(f'{float(ap):.4f}') + table_columns[i + 1].append(f'{float(ar):.4f}') + for k in self.top_k_visible: + top_k = self.category_records[category][ + f'gTop-{k}@{iou_thr}'] + table_columns[i + 1].append(f'{float(top_k):.4f}') + + # Number of gts + table_columns[0].append('Num GT') + for i, category in enumerate(self.category_records.keys()): + table_columns[i + 1].append( + f'{int(self.category_records[category]["num_gts"])}') + + table_data = [header] + table_rows = list(zip(*table_columns)) + table_data += table_rows + table_data = [list(row) for row in zip(*table_data)] + table = AsciiTable(table_data) + table.inner_footing_row_border = True + + if self.show_results: + print(table.table) + + return table.table + + def __category_mapping__(self, sub_class: str) -> str: + """Mapping the subclass name to the category name. + + Args: + sub_class (str): The subclass name in the original samples. + + Returns: + category (str): The category name. + """ + sub_class = sub_class.lower() + sub_class = sub_class.replace('single', 'sngl') + sub_class = sub_class.replace('inter', 'int') + sub_class = sub_class.replace('unique', 'uniq') + sub_class = sub_class.replace('common', 'cmn') + sub_class = sub_class.replace('attribute', 'attr') + if 'sngl' in sub_class and ('attr' in sub_class or 'eq' in sub_class): + sub_class = 'vg_sngl_attr' + return sub_class + + def __calculate_iou_array_( + self, data_item: dict) -> Tuple[np.ndarray, np.ndarray]: + """Calculate some information needed for eavl. + + Args: + data_item (dict): The subclass name in the original samples. + + Returns: + np.ndarray, np.ndarray : + The iou array sorted by the confidence and the + confidence scores. + """ + + pred_bboxes = data_item['pred_bboxes'] + gt_bboxes = data_item['gt_bboxes'] + # Sort the bounding boxes based on their scores + pred_scores = data_item['pred_scores'] + top_idxs = torch.argsort(pred_scores, descending=True) + pred_scores = pred_scores[top_idxs] + + pred_bboxes = to_9dof_box(index_box(pred_bboxes, top_idxs)) + gt_bboxes = to_9dof_box(gt_bboxes) + + iou_matrix = pred_bboxes.overlaps(pred_bboxes, + gt_bboxes) # (num_query, num_gt) + # (3) calculate the TP and NP, + # preparing for the forward AP/topk calculation + pred_scores = pred_scores.cpu().numpy() + iou_array = iou_matrix.cpu().numpy() + + return iou_array, pred_scores + + def __is_zero__(self, box): + if isinstance(box, (list, tuple)): + return (len(box[0]) == 0) + return (len(box) == 0) + + def __check_format__(self, raw_input: List[dict]) -> None: + """Check if the input conform with mmscan evaluation format. Transform + the input box format. + + Args: + raw_input (list[dict]): The input of VG evaluator. + """ + assert isinstance( + raw_input, + list), 'The input of VG evaluator should be a list of dict. ' + raw_input = raw_input + + for _index in tqdm(range(len(raw_input))): + if 'index' not in raw_input[_index]: + raw_input[_index]['index'] = len(self.save_buffer) + _index + + if 'subclass' not in raw_input[_index]: + raw_input[_index]['subclass'] = 'non-class' + + assert 'gt_bboxes' in raw_input[_index] + assert 'pred_bboxes' in raw_input[_index] + assert 'pred_scores' in raw_input[_index] + + for mode in ['pred_bboxes', 'gt_bboxes']: + if (isinstance(raw_input[_index][mode], dict) + and 'center' in raw_input[_index][mode]): + raw_input[_index][mode] = [ + torch.tensor(raw_input[_index][mode]['center']), + torch.tensor(raw_input[_index][mode]['size']).to( + torch.float32), + torch.tensor(raw_input[_index][mode]['rot']).to( + torch.float32) + ] diff --git a/mmscan/mmscan.py b/mmscan/mmscan.py new file mode 100644 index 0000000..85a6005 --- /dev/null +++ b/mmscan/mmscan.py @@ -0,0 +1,521 @@ +import os +import os.path as osp +import sys +import time +from copy import deepcopy +from typing import List, Union + +import numpy as np +import torch +from torch.utils.data import Dataset + +from mmscan.utils.box_utils import from_9dof_to_6dof +from mmscan.utils.data_io import id_mapping, load_json, read_annotation_pickle +from mmscan.utils.task_utils import anno_token_flatten + +PYTHON_VERSION = sys.version_info[0] + +if not PYTHON_VERSION == 3: + raise ValueError('MMScan dev-kit only supports Python version 3.') + +ENV_PATH = os.path.abspath(__file__) + + +class MMScan(Dataset): + """MMScan Dataset. + + This class serves as the API for experiments on the EmbodiedScan Dataset. + + Args: + version (str): The version of the database, now only support v1. + Defaults to 'v1'. + split (str): The split of the database, now only support train/val. + Defaults to 'train'. + dataroot (str): The root directory path of the database. + Defaults to the path of mmscan_data dir. + task (str): The language task of the database, now only support + MMScan-QA/MMScan-VG. + ratio (float): The ratio of the data to be used. + Defaults to 1.0. + verbose (bool): Whether to print the information or not. + Defaults to False. + check_mode (bool): Whether to debug or not. + Defaults to False. + token_flatten (bool): It only works in MMScan VG tasks, whether to + flatten the tokens or not. Defaults to True. + """ + + def __init__( + self, + version: str = 'v1', + split: str = 'train', + dataroot: str = '', + task: str = None, + ratio: float = 1.0, + verbose: bool = False, + check_mode: bool = False, + token_flatten: bool = True, + ) -> None: + """Initialize the database, prepare the embodeidscan annotation.""" + super(MMScan, self).__init__() + self.version = version + if len(dataroot) > 0: + self.dataroot = dataroot + else: + self.dataroot = os.path.join( + os.path.dirname(os.path.dirname(ENV_PATH)), 'mmscan_data') + self.verbose = verbose + + # now we skip the test split because we don not provide ground truth. + if split == 'test': + split = 'val' + self.split = split + self.check_mode = check_mode + if self.check_mode: + print("embodiedscan's checking mode!!!") + self.pkl_name = f'{self.dataroot}/embodiedscan_split' +\ + f'/embodiedscan-{self.version}' +\ + f'/embodiedscan_infos_{split}.pkl' + self.data_path = '{}/embodiedscan_split/data'.format(self.dataroot) + self.lang_anno_path = '{}/MMScan-beta-release'.format(self.dataroot) + + self.pcd_path = '{}/embodiedscan_split/process_pcd'.format( + self.dataroot) + + self.mapping_json_path = ( + '{}/../data_preparation/meta_data/mp3d_mapping.json'.format( + self.dataroot)) + self.id_mapping = id_mapping(self.mapping_json_path) + self.table_names = [ + 'es_file', + 'pc_file', + 'point_clouds', + 'bboxes', + 'object_ids', + 'object_types', + 'object_type_ints', + 'visible_view_object_dict', + 'extrinsics_c2w', + 'axis_align_matrix', + 'intrinsics', + 'depth_intrinsics', + 'image_paths', + 'depth_image_paths', + 'visible_instance_ids', + ] + self.lang_tasks = ['MMScan-QA', 'MMScan-VG', 'MMScan-DC'] + self.task_anno_mapping = { + 'MMScan-QA': 'MMScan_QA.json', + 'MMScan-VG': 'MMScan_VG.json', + 'MMScan-DC': None, + } + + # Part1: prepare the embodeidscan annotation. + assert osp.exists(self.pkl_name), 'Database not found: {}'.format( + self.pkl_name) + if verbose: + start = time.time() + self.embodiedscan_anno = self.__load_base_anno__(self.pkl_name) + if verbose: + print('\nLoading embodiedscan-{} for split {}, using {} seconds'. + format(self.version, self.split, + time.time() - start)) + + # Part2: prepare the MMScan annotation. + self.task = task + assert (self.task_anno_mapping.get(task, None) + is not None), 'Task {} is not supported yet'.format(task) + if verbose: + start = time.time() + self.mmscan_scan_id = load_json( + f'{self.lang_anno_path}/Data_splits/{self.split}-split.json') + self.mmscan_anno = load_json(f'{self.lang_anno_path}/MMScan_samples/' + + f'{self.task_anno_mapping[task]}')[ + self.split] + if ratio < 1.0: + self.mmscan_anno = self.__downsample_annos__( + self.mmscan_anno, ratio) + if self.check_mode: + self.mmscan_anno = self.mmscan_anno[:100] + if self.task == 'MMScan-VG' and token_flatten: + self.mmscan_anno = anno_token_flatten(self.mmscan_anno) + + self.mmscan_anno = self.__filter_lang_anno__(self.mmscan_anno) + self.data_collect() + if verbose: + end = time.time() + print( + '\nLoading {} split for the {} task, using {} seconds.'.format( + self.split, self.task, end - start)) + + def __getitem__(self, index_: int) -> dict: + """Return the sample item corresponding to the index. The item + contains: + + 1. Scan-level data: + - "ori_pcds" (tuple[Tensor]): The raw data, containing: + - pcd coordinates + - pcd colors + - pcd class labels + - pcd instance labels + - "pcds" (np.ndarray): The point cloud data of the scan, + shape [n_points, 6(xyz+rgb)]. + - "instance_labels" (np.ndarray): The object ID of each point, + shape [n_points, 1]. + - "class_labels" (np.ndarray): The class type of each point, + shape [n_points, 1]. + - "bboxes" (dict): Bounding boxes information, structured as: + { object_id: + { + "type": object_type (str), + "bbox": 9 DoF box (np.ndarray) + }, + ... + } + - "images" (list[dict]): A list of camera information, + each dictionary containing: + - 'img_path' (str): Path to the RGB image. + - 'depth_img_path' (str): Path to the depth image. + - 'intrinsic' (np.ndarray): + Camera intrinsic matrix of the RGB image. + - 'depth_intrinsic' (np.ndarray): + Camera intrinsic matrix of the depth image. + - 'extrinsic' (np.ndarray): Camera extrinsic matrix. + - 'visible_instance_id' (list): + IDs of objects visible in the image. + + 2. Anno-level data + - "sub_class": Sample category. + - "ID": Unique sample ID. + - "scan_id": Corresponding scan ID. + + VG Task: + - "target_id" (list[int]): IDs of target objects. + - "text" (str): Grounding text. + - "target" (list[str]): Types of target objects. + - "anchors" (list[str]): Types of anchor objects. + - "anchor_ids" (list[int]): IDs of anchor objects. + - "tokens_positive" + if not flatten: + (dict): + Position indices of mentioned objects in the text. + else: + (list): Position indices of targets in order. + + QA Task: + - "question" (str): The question text. + - "answers" (list[str]): List of possible answers. + - "object_ids" (list[int]): + Object IDs referenced in the question. + - "object_names" (list[str]): + Types of referenced objects. + - "input_bboxes_id" (list[int]): + IDs of input bounding boxes. + - "input_bboxes" (list[np.ndarray]): + Input bounding boxes, 9 DoF. + + Args: + index_ (int): The index. + + Returns: + dict: The sample item corresponding to the index. + """ + assert self.task is not None, 'Please set the task first!' + + # (1) store the "index" info + data_dict = {} + data_dict['index'] = index_ + + # (2) loading the data + scan_idx = self.mmscan_collect['anno'][index_]['scan_id'] + pcd_info = self.__process_pcd_info__(scan_idx) + images_info = self.__process_img_info__(scan_idx) + box_info = self.__process_box_info__(scan_idx) + + data_dict['ori_pcds'] = pcd_info['ori_pcds'] + data_dict['pcds'] = pcd_info['pcds'] + data_dict['obj_pcds'] = pcd_info['obj_pcds'] + data_dict['instance_labels'] = pcd_info['instance_labels'] + data_dict['class_labels'] = pcd_info['class_labels'] + data_dict['bboxes'] = box_info + data_dict['images'] = images_info + + # (3) loading the data from the collection + # necessary to use deepcopy? + data_dict.update(deepcopy(self.mmscan_collect['anno'][index_])) + + return data_dict + + def __len__(self) -> int: + assert self.task is not None, 'Please set the task first!' + return len(self.mmscan_collect['anno']) + + @property + def show_possess(self) -> List[str]: + """ + Returns: + list[str]: All data classes present in Embodiedscan database. + """ + return self.table_names + + @property + def show_mmscan_id(self) -> List[str]: + """ + Returns: + list[str]: All data classes present in Embodiedscan database. + """ + assert self.task is not None, 'Please set the task first!' + return self.mmscan_scan_id + + @property + def samples(self) -> List[dict]: + """ + Returns: + list[dict]: All samples in the MMScan language task. + """ + assert self.task is not None, 'Please set the task first!' + return self.mmscan_collect['anno'] + + def get_possess(self, table_name: str, scan_idx: str): + """Getting all database about the scan from embodeidscan. + + Args: + table_name (str): The ype of the expected data. + scan_idx (str): The scan id to get the data. + + Returns: + The data corresponding to the table_name and scan_idx. + """ + assert table_name in self.table_names, 'Table {} not found'.format( + table_name) + + if table_name == 'point_clouds': + return torch.load( + f'{self.pcd_path}/{self.id_mapping.forward(scan_idx)}.pth') + elif table_name == 'es_file': + return deepcopy(self.pkl_name) + elif table_name == 'pc_file': + return f'{self.pcd_path}/{self.id_mapping.forward(scan_idx)}.pth' + else: + return self.embodiedscan_anno[scan_idx][table_name] + + def data_collect(self): + """Collect MMScan Samples. + + Store the collected samples in `self.MMScan_collect`. For MMScan QA + samples, they need to be flattened. + """ + + assert self.task is not None, 'Please set the task first!' + self.mmscan_collect = {} + + # MMScan anno processing + if self.task == 'MMScan-QA': + self.mmscan_collect['anno'] = [] + + for sample in self.mmscan_anno: + if self.split == 'train': + for answer in sample['answers']: + sub_sample = deepcopy(sample) + sub_sample['answers'] = [answer] + self.mmscan_collect['anno'].append(sub_sample) + else: + self.mmscan_collect['anno'].append(sample) + + elif self.task == 'MMScan-VG': + self.mmscan_collect['anno'] = self.mmscan_anno + else: + raise NotImplementedError + + def __filter_lang_anno__(self, samples: List[dict]) -> List[dict]: + """Check and the annotation is valid or not. + + Args: + samples (list[dict]): The samples. + + Returns: + list[dict] : The filtered results. + """ + + if self.task != 'MMScan-VG': + return samples + + filtered_samples = [] + for sample in samples: + if self.__check_lang_anno__(sample): + filtered_samples.append(sample) + return filtered_samples + + def __check_lang_anno__(self, sample: dict) -> bool: + """Check if the item of the annotation is valid or not. + + Args: + sample (dict): The item from the samples. + + Returns: + bool : Whether the item is valid or not. + """ + # fix little typo + anno_obj_ids = self.embodiedscan_anno[sample['scan_id']]['object_ids'] + if self.task == 'MMScan-VG': + if (len(sample['target']) != len(sample['target_id']) + or len(sample['target_id']) == 0): + return False + object_ids = (sample['target_id'] + sample['anchor_ids'] + + sample['distractor_ids']) + for object_id in object_ids: + if (object_id not in anno_obj_ids): + return False + if self.task == 'MMScan-QA': + for object_id in sample['object_ids']: + if (object_id not in anno_obj_ids): + return False + + return True + + def __load_base_anno__(self, pkl_path: str) -> dict: + """Load the embodiedscan pkl file, it will return the embodiedscan + annotations of all scans in the corresponding split. + + Args: + pkl_path (str): The path of the pkl. + + Returns: + dict : The embodiedscan annotations of scans. + (with scan_idx as keys) + """ + return read_annotation_pickle(pkl_path, show_progress=self.verbose) + + def __process_pcd_info__(self, scan_idx: str) -> dict: + """Retrieve the corresponding scan information based on the input scan + ID, including original data, point clouds, object pointclouds, instance + labels and the center of the scan. + + Args: + scan_idx (str): ID of the scan. + + Returns: + dict : The corresponding scan information. + """ + + assert (scan_idx in self.embodiedscan_anno.keys() + ), 'Scan {} is not in {} split'.format(scan_idx, self.split) + + scan_info = {} + pcd_data = torch.load( + f'{self.pcd_path}/{self.id_mapping.forward(scan_idx)}.pth') + points, colors, class_labels, instance_labels = pcd_data + + pcds = np.concatenate([points, colors], 1) + scan_info['ori_pcds'] = deepcopy(pcd_data) + scan_info['pcds'] = deepcopy(pcds) + + obj_pcds = {} + for i in range(instance_labels.max() + 1): + mask = instance_labels == i + if len(pcds[mask]) > 0: + obj_pcds.update({i: pcds[mask]}) + + scan_info['obj_pcds'] = obj_pcds + scan_info['scene_center'] = (points.max(0) + points.min(0)) / 2 + scan_info['instance_labels'] = np.array(instance_labels) + scan_info['class_labels'] = np.array(class_labels) + return scan_info + + def __process_box_info__(self, scan_idx: str) -> dict: + """Retrieve the corresponding bounding boxes information based on the + input scan ID. For each object, this function will return its ID, type, + bounding boxes in format of [ID: {"bbox":bbox, "type":type},...]. + + Args: + scan_idx (str): ID of the scan. + + Returns: + dict : The corresponding bounding boxes information. + """ + assert (scan_idx in self.embodiedscan_anno.keys() + ), 'Scan {} is not in {} split'.format(scan_idx, self.split) + + bboxes = deepcopy(self.get_possess('bboxes', scan_idx)) + object_ids = deepcopy(self.get_possess('object_ids', scan_idx)) + object_types = deepcopy(self.get_possess('object_types', scan_idx)) + return { + object_ids[i]: { + 'bbox': bboxes[i], + 'type': object_types[i] + } + for i in range(len(object_ids)) + } + + def __process_img_info__(self, scan_idx: str) -> List[dict]: + """Retrieve the corresponding camera information based on the input + scan ID. For each camera, this function will return its intrinsics, + extrinsics, image paths(both rgb & depth) and the visible object ids. + + Args: + scan_idx (str): ID of the scan. + + Returns: + list[dict] :The corresponding bounding boxes information + for each camera. + """ + assert (scan_idx in self.embodiedscan_anno.keys() + ), 'Scan {} is not in {} split'.format(scan_idx, self.split) + + img_info = dict() + img_info['img_path'] = deepcopy( + self.get_possess('image_paths', scan_idx)) + img_info['depth_img_path'] = deepcopy( + self.get_possess('depth_image_paths', scan_idx)) + img_info['intrinsic'] = deepcopy( + self.get_possess('intrinsics', scan_idx)) + img_info['depth_intrinsic'] = deepcopy( + self.get_possess('depth_intrinsics', scan_idx)) + img_info['extrinsic'] = deepcopy( + self.get_possess('extrinsics_c2w', scan_idx)) + img_info['visible_instance_id'] = deepcopy( + self.get_possess('visible_instance_ids', scan_idx)) + + img_info_list = [] + for camera_index in range(len(img_info['img_path'])): + item = {} + for possess in img_info.keys(): + item[possess] = img_info[possess][camera_index] + img_info_list.append(item) + return img_info_list + + def down_9dof_to_6dof( + self, pcd: Union[np.ndarray, torch.Tensor], + box_9dof: Union[np.ndarray, torch.Tensor]) -> np.ndarray: + """Transform the 9DOF bounding box to 6DOF bounding box. Find the + minimum bounding boxes to cover all the point clouds. + + Args: + pcd(np.ndarray / Tensor): + the point clouds + box_9DOF(np.ndarray / Tensor): + the 9DOF bounding box + + Returns: + np.ndarray : + The transformed 6DOF bounding box. + """ + + return from_9dof_to_6dof(pcd, box_9dof) + + def __downsample_annos__(self, annos: List[dict], + ratio: float) -> List[dict]: + """downsample the annotations with a given ratio. + + Args: + annos (list[dict]): The original annotations. + ratio (float): The ratio to downsample. + + Returns: + list[dict] : The result. + """ + d_annos = [] + for index in range(len(annos)): + if index % int(1 / ratio) == 0: + d_annos.append(annos[index]) + return d_annos diff --git a/mmscan/utils/box_utils.py b/mmscan/utils/box_utils.py new file mode 100644 index 0000000..8fe7284 --- /dev/null +++ b/mmscan/utils/box_utils.py @@ -0,0 +1,285 @@ +from typing import List, Union + +import numpy as np +import torch + +from mmscan.utils.euler_utils import EulerInstance3DBoxes + +try: + from pytorch3d.ops import box3d_overlap + from pytorch3d.transforms import matrix_to_euler_angles +except ImportError: + box3d_overlap = None + + +def _axis_angle_rotation(axis: str, angle: np.ndarray) -> np.ndarray: + """Return the rotation matrices for one of the rotations about an axis of + which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = np.cos(angle) + sin = np.sin(angle) + one = np.ones_like(angle) + zero = np.zeros_like(angle) + + if axis == 'X': + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == 'Y': + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == 'Z': + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError('letter must be either X, Y or Z.') + + return np.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: np.ndarray, + convention: str) -> np.ndarray: + """Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as array of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as array of shape (..., 3, 3). + """ + if euler_angles.ndim == 0 or euler_angles.shape[-1] != 3: + raise ValueError('Invalid input euler angles.') + if len(convention) != 3: + raise ValueError('Convention must have 3 letters.') + if convention[1] in (convention[0], convention[2]): + raise ValueError(f'Invalid convention {convention}.') + for letter in convention: + if letter not in ('X', 'Y', 'Z'): + raise ValueError(f'Invalid letter {letter} in convention string.') + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, np.split(euler_angles, 3, axis=-1)) + ] + matrices = [x.squeeze(axis=-3) for x in matrices] + return np.matmul(np.matmul(matrices[0], matrices[1]), matrices[2]) + + +def euler_to_matrix_np(euler: np.ndarray) -> np.ndarray: + """Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler (np.ndarray) : (..., 3) + + Returns: + np.ndarray : (..., 3, 3) + """ + # euler: N*3 np array + euler_tensor = torch.tensor(euler) + matrix_tensor = euler_angles_to_matrix(euler_tensor, 'ZXY') + return np.array(matrix_tensor) + + +def is_inside_box(points, center, size, rotation_mat): + """Check if points are inside a 3D bounding box. + + Args: + points: 3D points, numpy array of shape (n, 3). + center: center of the box, numpy array of shape (3, ). + size: size of the box, numpy array of shape (3, ). + rotation_mat: rotation matrix of the box, + numpy array of shape (3, 3). + + Returns: + Boolean array of shape (n, ) + indicating if each point is inside the box. + """ + assert points.shape[1] == 3, 'points should be of shape (n, 3)' + points = np.array(points) # n,3 + center = np.array(center) # n, 3 + size = np.array(size) # n, 3 + rotation_mat = np.array(rotation_mat) + assert rotation_mat.shape == ( + 3, + 3, + ), f'R should be shape (3,3), but got {rotation_mat.shape}' + pcd_local = (points - center) @ rotation_mat # n, 3 + pcd_local = pcd_local / size * 2.0 # scale to [-1, 1] # n, 3 + pcd_local = abs(pcd_local) + return ((pcd_local[:, 0] <= 1) + & (pcd_local[:, 1] <= 1) + & (pcd_local[:, 2] <= 1)) + + +def normalize_box(scene_pcd, embodied_scan_bbox): + """Find the smallest 6 DoF box that covers these points which 9 DoF box + covers. + + Args: + scene_pcd (Tensor / ndarray): + (..., 3) + embodied_scan_bbox (Tensor / ndarray): + (9,) 9 DoF box + + Returns: + Tensor: Transformed 3D box of shape (N, 8, 3). + """ + + bbox = np.array(embodied_scan_bbox) + orientation = euler_to_matrix_np(bbox[np.newaxis, 6:])[0] + position = np.array(bbox[:3]) + size = np.array(bbox[3:6]) + obj_mask = np.array( + is_inside_box(scene_pcd[:, :3], position, size, orientation), + dtype=bool, + ) + obj_pc = scene_pcd[obj_mask] + + # resume the same if there's None + if obj_pc.shape[0] < 1: + return embodied_scan_bbox[:6] + xmin = np.min(obj_pc[:, 0]) + ymin = np.min(obj_pc[:, 1]) + zmin = np.min(obj_pc[:, 2]) + xmax = np.max(obj_pc[:, 0]) + ymax = np.max(obj_pc[:, 1]) + zmax = np.max(obj_pc[:, 2]) + bbox = np.array([ + (xmin + xmax) / 2, + (ymin + ymax) / 2, + (zmin + zmax) / 2, + xmax - xmin, + ymax - ymin, + zmax - zmin, + ]) + return bbox + + +def from_9dof_to_6dof(pcd_data, bbox_): + # that's a kind of loss of information, so we don't recommend + return normalize_box(pcd_data, bbox_) + + +def bbox_to_corners(centers, sizes, rot_mat: torch.Tensor) -> torch.Tensor: + """Transform bbox parameters to the 8 corners. + + Args: + bbox (Tensor): 3D box of shape (N, 6) or (N, 7) or (N, 9). + + Returns: + Tensor: Transformed 3D box of shape (N, 8, 3). + """ + device = centers.device + use_batch = False + if len(centers.shape) == 3: + use_batch = True + batch_size, n_proposals = centers.shape[0], centers.shape[1] + centers = centers.reshape(-1, 3) + sizes = sizes.reshape(-1, 3) + rot_mat = rot_mat.reshape(-1, 3, 3) + + n_box = centers.shape[0] + if use_batch: + assert n_box == batch_size * n_proposals + centers = centers.unsqueeze(1).repeat(1, 8, 1) # shape (N, 8, 3) + half_sizes = sizes.unsqueeze(1).repeat(1, 8, 1) / 2 # shape (N, 8, 3) + eight_corners_x = (torch.tensor([1, 1, 1, 1, -1, -1, -1, -1], + device=device).unsqueeze(0).repeat( + n_box, 1)) # shape (N, 8) + eight_corners_y = (torch.tensor([1, 1, -1, -1, 1, 1, -1, -1], + device=device).unsqueeze(0).repeat( + n_box, 1)) # shape (N, 8) + eight_corners_z = (torch.tensor([1, -1, -1, 1, 1, -1, -1, 1], + device=device).unsqueeze(0).repeat( + n_box, 1)) # shape (N, 8) + eight_corners = torch.stack( + (eight_corners_x, eight_corners_y, eight_corners_z), + dim=-1) # shape (N, 8, 3) + eight_corners = eight_corners * half_sizes # shape (N, 8, 3) + # rot_mat: (N, 3, 3), eight_corners: (N, 8, 3) + rotated_corners = torch.matmul(eight_corners, + rot_mat.transpose(1, 2)) # shape (N, 8, 3) + res = centers + rotated_corners + if use_batch: + res = res.reshape(batch_size, n_proposals, 8, 3) + return res + + +def euler_iou3d_corners(boxes1, boxes2): + rows = boxes1.shape[0] + cols = boxes2.shape[0] + if rows * cols == 0: + return boxes1.new(rows, cols) + + _, iou3d = box3d_overlap(boxes1, boxes2) + return iou3d + + +def euler_iou3d_bbox(center1, size1, rot1, center2, size2, rot2): + """Calculate the 3D IoU between two grounps of 9DOF bounding boxes. + + Args: + center1 (Tensor): (n, cx, cy, cz) of grounp1. + size1 (Tensor): (n, l, w, h) of grounp1. + rot1 (Tensor): rot matrix of grounp1. + center1 (Tensor): (m, cx, cy, cz) of grounp2. + size1 (Tensor): (m, l, w, h) of grounp2. + rot1 (Tensor): rot matrix of grounp2. + + Returns: + numpy.ndarray: (n, m) the 3D IoU. + """ + if torch.cuda.is_available(): + center1 = center1.cuda() + size1 = size1.cuda() + rot1 = rot1.cuda() + center2 = center2.cuda() + size2 = size2.cuda() + rot2 = rot2.cuda() + corners1 = bbox_to_corners(center1, size1, rot1) + corners2 = bbox_to_corners(center2, size2, rot2) + result = euler_iou3d_corners(corners1, corners2) + + if torch.cuda.is_available(): + result = result.detach().cpu() + return result.numpy() + + +def index_box(boxes: List[torch.tensor], + indices: Union[List[torch.tensor], torch.tensor])\ + -> Union[List[torch.tensor], torch.tensor]: + """Convert a grounp of bounding boxes represented in [center, size, rot] + format to 9 DoF format. + + Args: + box (list/tuple, tensor): boxes in a grounp. + + Returns: + Tensor : 9 DoF format. (num,9) + """ + if isinstance(boxes, (list, tuple)): + return [index_box(box, indices) for box in boxes] + else: + return boxes[indices] + + +def to_9dof_box(box: List[torch.tensor]): + """Convert a grounp of bounding boxes represented in [center, size, rot] + format to 9 DoF format. + + Args: + box (list/tuple, tensor): boxes in a grounp. + + Returns: + Tensor : 9 DoF format. (num,9) + """ + if isinstance(box, (list, tuple)): + center, size, rotmat = box + euler = matrix_to_euler_angles(rotmat, 'ZXY') + box = torch.concat([center, size, euler], dim=-1) + return EulerInstance3DBoxes(box, origin=(0.5, 0.5, 0.5)) diff --git a/mmscan/utils/data_io.py b/mmscan/utils/data_io.py new file mode 100644 index 0000000..6c08c1e --- /dev/null +++ b/mmscan/utils/data_io.py @@ -0,0 +1,207 @@ +import json +import os + +import numpy as np +from tqdm import tqdm + + +def load_json(path: str): + """Check the path and read the json file. + + Args: + path (str): the path of the json file. + Returns: + the data in the json file. + """ + assert os.path.exists(path) + with open(path, 'r') as f: + data = json.load(f) + return data + + +def read_annotation_pickle(path: str, show_progress: bool = True): + """Read annotation pickle file and return a dictionary, the embodiedscan + annotation for all scans in the split. + + Args: + path (str): the path of the annotation pickle file. + show_progress (bool): whether showing the progress. + Returns: + dict: A dictionary. + scene_id : (bboxes, object_ids, object_types, + visible_view_object_dict, extrinsics_c2w, + axis_align_matrix, intrinsics, image_paths) + + bboxes: numpy array of bounding boxes, + shape (N, 9): xyz, lwh, ypr + object_ids: numpy array of obj ids, shape (N,) + object_types: list of strings, each string is a type + of object + visible_view_object_dict: a dictionary + {view_id: visible_instance_ids} + extrinsics_c2w: a list of 4x4 matrices, each matrix is + the extrinsic matrix of a view + axis_align_matrix: a 4x4 matrix, the axis-aligned matrix + of the scene + intrinsics: a list of 4x4 matrices, each matrix is the + intrinsic matrix of a view + image_paths: a list of strings, each string is the path + of an image in the scene + """ + with open(path, 'rb') as f: + data = np.load(f, allow_pickle=True) + + metainfo = data['metainfo'] + object_type_to_int = metainfo['categories'] + object_int_to_type = {v: k for k, v in object_type_to_int.items()} + datalist = data['data_list'] + output_data = {} + pbar = (tqdm(range(len(datalist))) if show_progress else range( + len(datalist))) + for scene_idx in pbar: + images = datalist[scene_idx]['images'] + + intrinsic = datalist[scene_idx].get('cam2img', None) # a 4x4 matrix + missing_intrinsic = False + if intrinsic is None: + missing_intrinsic = ( + True # each view has different intrinsic for mp3d + ) + depth_intrinsic = datalist[scene_idx].get( + 'cam2depth', None) # a 4x4 matrix, for 3rscan + if depth_intrinsic is None and not missing_intrinsic: + depth_intrinsic = datalist[scene_idx][ + 'depth_cam2img'] # a 4x4 matrix, for scannet + axis_align_matrix = datalist[scene_idx][ + 'axis_align_matrix'] # a 4x4 matrix + + scene_id = datalist[scene_idx]['sample_idx'] + + instances = datalist[scene_idx]['instances'] + bboxes = [] + object_ids = [] + object_types = [] + object_type_ints = [] + for object_idx in range(len(instances)): + bbox_3d = instances[object_idx]['bbox_3d'] # list of 9 values + bbox_label_3d = instances[object_idx]['bbox_label_3d'] # int + bbox_id = instances[object_idx]['bbox_id'] # int + object_type = object_int_to_type[bbox_label_3d] + + object_type_ints.append(bbox_label_3d) + object_types.append(object_type) + bboxes.append(bbox_3d) + object_ids.append(bbox_id) + bboxes = np.array(bboxes) + object_ids = np.array(object_ids) + object_type_ints = np.array(object_type_ints) + + visible_view_object_dict = {} + visible_view_object_list = [] + extrinsics_c2w = [] + intrinsics = [] + depth_intrinsics = [] + image_paths = [] + depth_image_paths = [] + + for image_idx in range(len(images)): + img_path = images[image_idx]['img_path'] # str + depth_image = images[image_idx]['depth_path'] + extrinsic_id = img_path.split('/')[-1].split('.')[0] # str + cam2global = images[image_idx]['cam2global'] # a 4x4 matrix + + if missing_intrinsic: + intrinsic = images[image_idx]['cam2img'] + + depth_intrinsic = images[image_idx]['cam2img'] + visible_instance_indices = images[image_idx][ + 'visible_instance_ids'] # numpy array of int + visible_instance_ids = object_ids[visible_instance_indices] + visible_view_object_dict[extrinsic_id] = visible_instance_ids + visible_view_object_list.append(visible_instance_ids) + extrinsics_c2w.append(cam2global) + intrinsics.append(intrinsic) + depth_intrinsics.append(depth_intrinsic) + image_paths.append(img_path) + depth_image_paths.append(depth_image) + if show_progress: + pbar.set_description(f'Processing scene {scene_id}') + output_data[scene_id] = { + # object level + 'bboxes': bboxes, + 'object_ids': object_ids, + 'object_types': object_types, + 'object_type_ints': object_type_ints, + # image level + 'visible_instance_ids': visible_view_object_list, + 'visible_view_object_dict': visible_view_object_dict, + 'extrinsics_c2w': extrinsics_c2w, + 'axis_align_matrix': axis_align_matrix, + 'intrinsics': intrinsics, + 'depth_intrinsics': depth_intrinsics, + 'image_paths': image_paths, + 'depth_image_paths': depth_image_paths, + } + return output_data + + +class id_mapping: + """We rename the scan for consistency. + + This class is used to map the original scan names to the new names. + Args: + mp3d_mapping_path (str): the path of the mp3d mapping file. + """ + + def __init__(self, mp3d_mapping_path: str): + + def reverse_dict(mapping): + re_mapping = {mapping[k]: k for k in mapping.keys()} + return re_mapping + + with open(mp3d_mapping_path, 'r') as f: + self.mp3d_mapping = json.load(f) + + self.mp3d_mapping_trans = reverse_dict(self.mp3d_mapping) + + def forward(self, scan_name: str): + """map forward the original scan names to the new names. + + Args: + scan_name (str): the original scan name. + Returns: + str: the new name. + """ + + if 'matterport3d/' in scan_name: + scan_, region_ = ( + self.mp3d_mapping[scan_name.split('/')[1]], + scan_name.split('/')[2], + ) + return scan_ + '_' + region_ + elif '3rscan' in scan_name: + return scan_name.split('/')[1] + elif 'scannet' in scan_name: + return scan_name.split('/')[1] + else: + raise ValueError(f'{scan_name} is not a scan name') + + def backward(self, scan_name: str): + """map backward the new names to the original scan names. + + Args: + scan_name (str): the new name. + Returns: + str: the original scan name. + """ + if '1mp3d' in scan_name: + scene1, scene2, region = scan_name.split('_') + return ('matterport3d/' + + self.mp3d_mapping_trans[scene1 + '_' + scene2] + '/' + + region) + elif '3rscan' in scan_name: + return '3rscan/' + scan_name + elif 'scene' in scan_name: + return 'scannet/' + scan_name + else: + raise ValueError(f'{scan_name} is not a scan name') diff --git a/mmscan/utils/euler_utils.py b/mmscan/utils/euler_utils.py new file mode 100644 index 0000000..4f078b4 --- /dev/null +++ b/mmscan/utils/euler_utils.py @@ -0,0 +1,850 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from abc import abstractmethod +from typing import Iterator, Optional, Sequence, Tuple, Union + +import numpy as np +import torch + +try: + from pytorch3d.ops import box3d_overlap + from pytorch3d.transforms import (euler_angles_to_matrix, + matrix_to_euler_angles) +except ImportError: + box3d_overlap = None + euler_angles_to_matrix = None + matrix_to_euler_angles = None +from torch import Tensor + + +class BaseInstance3DBoxes: + """Base class for 3D Boxes. + + Note: + The box is bottom centered, i.e. the relative position of origin in the + box is (0.5, 0.5, 0). + + Args: + tensor (Tensor or np.ndarray or Sequence[Sequence[float]]): The boxes + data with shape (N, box_dim). + box_dim (int): Number of the dimension of a box. Each row is + (x, y, z, x_size, y_size, z_size, yaw). Defaults to 7. + with_yaw (bool): Whether the box is with yaw rotation. If False, the + value of yaw will be set to 0 as minmax boxes. Defaults to True. + origin (Tuple[float]): Relative position of the box origin. + Defaults to (0.5, 0.5, 0). This will guide the box be converted to + (0.5, 0.5, 0) mode. + + Attributes: + tensor (Tensor): Float matrix with shape (N, box_dim). + box_dim (int): Integer indicating the dimension of a box. Each row is + (x, y, z, x_size, y_size, z_size, yaw, ...). + with_yaw (bool): If True, the value of yaw will be set to 0 as minmax + boxes. + """ + + YAW_AXIS: int = 0 + + def __init__( + self, + tensor: Union[Tensor, np.ndarray, Sequence[Sequence[float]]], + box_dim: int = 7, + with_yaw: bool = True, + origin: Tuple[float, float, float] = (0.5, 0.5, 0) + ) -> None: + if isinstance(tensor, Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that does + # not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((-1, box_dim)) + assert tensor.dim() == 2 and tensor.size(-1) == box_dim, \ + ('The box dimension must be 2 and the length of the last ' + f'dimension must be {box_dim}, but got boxes with shape ' + f'{tensor.shape}.') + + if tensor.shape[-1] == 6: + # If the dimension of boxes is 6, we expand box_dim by padding 0 as + # a fake yaw and set with_yaw to False + assert box_dim == 6 + fake_rot = tensor.new_zeros(tensor.shape[0], 1) + tensor = torch.cat((tensor, fake_rot), dim=-1) + self.box_dim = box_dim + 1 + self.with_yaw = False + else: + self.box_dim = box_dim + self.with_yaw = with_yaw + self.tensor = tensor.clone() + + if origin != (0.5, 0.5, 0): + dst = self.tensor.new_tensor((0.5, 0.5, 0)) + src = self.tensor.new_tensor(origin) + self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src) + + @property + def shape(self) -> torch.Size: + """torch.Size: Shape of boxes.""" + return self.tensor.shape + + @property + def volume(self) -> Tensor: + """Tensor: A vector with volume of each box in shape (N, ).""" + return self.tensor[:, 3] * self.tensor[:, 4] * self.tensor[:, 5] + + @property + def dims(self) -> Tensor: + """Tensor: Size dimensions of each box in shape (N, 3).""" + return self.tensor[:, 3:6] + + @property + def yaw(self) -> Tensor: + """Tensor: A vector with yaw of each box in shape (N, ).""" + return self.tensor[:, 6] + + @property + def height(self) -> Tensor: + """Tensor: A vector with height of each box in shape (N, ).""" + return self.tensor[:, 5] + + @property + def top_height(self) -> Tensor: + """Tensor: A vector with top height of each box in shape (N, ).""" + return self.bottom_height + self.height + + @property + def bottom_height(self) -> Tensor: + """Tensor: A vector with bottom height of each box in shape (N, ).""" + return self.tensor[:, 2] + + @property + def center(self) -> Tensor: + """Calculate the center of all the boxes. + + Note: + In MMDetection3D's convention, the bottom center is usually taken + as the default center. + + The relative position of the centers in different kinds of boxes + are different, e.g., the relative center of a boxes is + (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) in lidar. It is + recommended to use ``bottom_center`` or ``gravity_center`` for + clearer usage. + + Returns: + Tensor: A tensor with center of each box in shape (N, 3). + """ + return self.bottom_center + + @property + def bottom_center(self) -> Tensor: + """Tensor: A tensor with center of each box in shape (N, 3).""" + return self.tensor[:, :3] + + @property + def gravity_center(self) -> Tensor: + """Tensor: A tensor with center of each box in shape (N, 3).""" + bottom_center = self.bottom_center + gravity_center = torch.zeros_like(bottom_center) + gravity_center[:, :2] = bottom_center[:, :2] + gravity_center[:, 2] = bottom_center[:, 2] + self.tensor[:, 5] * 0.5 + return gravity_center + + @property + def corners(self) -> Tensor: + """Tensor: A tensor with 8 corners of each box in shape (N, 8, 3).""" + pass + + @property + def bev(self) -> Tensor: + """Tensor: 2D BEV box of each box with rotation in XYWHR format, in + shape (N, 5).""" + return self.tensor[:, [0, 1, 3, 4, 6]] + + def in_range_bev( + self, box_range: Union[Tensor, np.ndarray, + Sequence[float]]) -> Tensor: + """Check whether the boxes are in the given range. + + Args: + box_range (Tensor or np.ndarray or Sequence[float]): The range of + box in order of (x_min, y_min, x_max, y_max). + + Note: + The original implementation of SECOND checks whether boxes in a + range by checking whether the points are in a convex polygon, we + reduce the burden for simpler cases. + + Returns: + Tensor: A binary vector indicating whether each box is inside the + reference range. + """ + in_range_flags = ((self.bev[:, 0] > box_range[0]) + & (self.bev[:, 1] > box_range[1]) + & (self.bev[:, 0] < box_range[2]) + & (self.bev[:, 1] < box_range[3])) + return in_range_flags + + @abstractmethod + def rotate( + self, + angle: Union[Tensor, np.ndarray, float], + points: Optional[Union[Tensor, np.ndarray]] = None + ) -> Union[Tuple[Tensor, Tensor], Tuple[np.ndarray, np.ndarray], + Tuple[Tensor], None]: + """Rotate boxes with points (optional) with the given angle or rotation + matrix. + + Args: + angle (Tensor or np.ndarray or float): Rotation angle or rotation + matrix. + points (Tensor or np.ndarray or :obj:``, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns None, + otherwise it returns the rotated points and the rotation matrix + ``rot_mat_T``. + """ + pass + + @abstractmethod + def flip( + self, + bev_direction: str = 'horizontal', + points: Optional[Union[Tensor, np.ndarray, ]] = None + ) -> Union[Tensor, np.ndarray, None]: + """Flip the boxes in BEV along given BEV direction. + + Args: + bev_direction (str): Direction by which to flip. Can be chosen from + 'horizontal' and 'vertical'. Defaults to 'horizontal'. + points (Tensor or np.ndarray or :obj:``, optional): + Points to flip. Defaults to None. + + Returns: + Tensor or np.ndarray or :obj:`` or None: When ``points`` + is None, the function returns None, otherwise it returns the + flipped points. + """ + pass + + def translate(self, trans_vector: Union[Tensor, np.ndarray]) -> None: + """Translate boxes with the given translation vector. + + Args: + trans_vector (Tensor or np.ndarray): Translation vector of size + 1x3. + """ + if not isinstance(trans_vector, Tensor): + trans_vector = self.tensor.new_tensor(trans_vector) + self.tensor[:, :3] += trans_vector + + def in_range_3d( + self, box_range: Union[Tensor, np.ndarray, + Sequence[float]]) -> Tensor: + """Check whether the boxes are in the given range. + + Args: + box_range (Tensor or np.ndarray or Sequence[float]): The range of + box (x_min, y_min, z_min, x_max, y_max, z_max). + + Note: + In the original implementation of SECOND, checking whether a box in + the range checks whether the points are in a convex polygon, we try + to reduce the burden for simpler cases. + + Returns: + Tensor: A binary vector indicating whether each point is inside the + reference range. + """ + in_range_flags = ((self.tensor[:, 0] > box_range[0]) + & (self.tensor[:, 1] > box_range[1]) + & (self.tensor[:, 2] > box_range[2]) + & (self.tensor[:, 0] < box_range[3]) + & (self.tensor[:, 1] < box_range[4]) + & (self.tensor[:, 2] < box_range[5])) + return in_range_flags + + @abstractmethod + def convert_to(self, + dst: int, + rt_mat: Optional[Union[Tensor, np.ndarray]] = None, + correct_yaw: bool = False) -> 'BaseInstance3DBoxes': + """Convert self to ``dst`` mode. + + Args: + dst (int): The target Box mode. + rt_mat (Tensor or np.ndarray, optional): The rotation and + translation matrix between different coordinates. + Defaults to None. The conversion from ``src`` coordinates to + ``dst`` coordinates usually comes along the change of sensors, + e.g., from camera to LiDAR. This requires a transformation + matrix. + correct_yaw (bool): Whether to convert the yaw angle to the target + coordinate. Defaults to False. + + Returns: + :obj:`BaseInstance3DBoxes`: The converted box of the same type in + the ``dst`` mode. + """ + pass + + def scale(self, scale_factor: float) -> None: + """Scale the box with horizontal and vertical scaling factors. + + Args: + scale_factors (float): Scale factors to scale the boxes. + """ + self.tensor[:, :6] *= scale_factor + self.tensor[:, 7:] *= scale_factor # velocity + + def nonempty(self, threshold: float = 0.0) -> Tensor: + """Find boxes that are non-empty. + + A box is considered empty if either of its side is no larger than + threshold. + + Args: + threshold (float): The threshold of minimal sizes. Defaults to 0.0. + + Returns: + Tensor: A binary vector which represents whether each box is empty + (False) or non-empty (True). + """ + box = self.tensor + size_x = box[..., 3] + size_y = box[..., 4] + size_z = box[..., 5] + keep = ((size_x > threshold) + & (size_y > threshold) & (size_z > threshold)) + return keep + + def __getitem__( + self, item: Union[int, slice, np.ndarray, + Tensor]) -> 'BaseInstance3DBoxes': + """ + Args: + item (int or slice or np.ndarray or Tensor): Index of boxes. + + Note: + The following usage are allowed: + + 1. `new_boxes = boxes[3]`: Return a `Boxes` that contains only one + box. + 2. `new_boxes = boxes[2:10]`: Return a slice of boxes. + 3. `new_boxes = boxes[vector]`: Where vector is a + torch.BoolTensor with `length = len(boxes)`. Nonzero elements in + the vector will be selected. + + Note that the returned Boxes might share storage with this Boxes, + subject to PyTorch's indexing semantics. + + Returns: + :obj:`BaseInstance3DBoxes`: A new object of + :class:`BaseInstance3DBoxes` after indexing. + """ + original_type = type(self) + if isinstance(item, int): + return original_type(self.tensor[item].view(1, -1), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + b = self.tensor[item] + assert b.dim() == 2, \ + f'Indexing on Boxes with {item} failed to return a matrix!' + return original_type(b, box_dim=self.box_dim, with_yaw=self.with_yaw) + + def __len__(self) -> int: + """int: Number of boxes in the current object.""" + return self.tensor.shape[0] + + def __repr__(self) -> str: + """str: Return a string that describes the object.""" + return self.__class__.__name__ + '(\n ' + str(self.tensor) + ')' + + @classmethod + def cat(cls, boxes_list: Sequence['BaseInstance3DBoxes'] + ) -> 'BaseInstance3DBoxes': + """Concatenate a list of Boxes into a single Boxes. + + Args: + boxes_list (Sequence[:obj:`BaseInstance3DBoxes`]): List of boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: The concatenated boxes. + """ + assert isinstance(boxes_list, (list, tuple)) + if len(boxes_list) == 0: + return cls(torch.empty(0)) + assert all(isinstance(box, cls) for box in boxes_list) + + # use torch.cat (v.s. layers.cat) + # so the returned boxes never share storage with input + cat_boxes = cls(torch.cat([b.tensor for b in boxes_list], dim=0), + box_dim=boxes_list[0].box_dim, + with_yaw=boxes_list[0].with_yaw) + return cat_boxes + + def numpy(self) -> np.ndarray: + """Reload ``numpy`` from self.tensor.""" + return self.tensor.numpy() + + def to(self, device: Union[str, torch.device], *args, + **kwargs) -> 'BaseInstance3DBoxes': + """Convert current boxes to a specific device. + + Args: + device (str or :obj:`torch.device`): The name of the device. + + Returns: + :obj:`BaseInstance3DBoxes`: A new boxes object on the specific + device. + """ + original_type = type(self) + return original_type(self.tensor.to(device, *args, **kwargs), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + def cpu(self) -> 'BaseInstance3DBoxes': + """Convert current boxes to cpu device. + + Returns: + :obj:`BaseInstance3DBoxes`: A new boxes object on the cpu device. + """ + original_type = type(self) + return original_type(self.tensor.cpu(), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + def cuda(self, *args, **kwargs) -> 'BaseInstance3DBoxes': + """Convert current boxes to cuda device. + + Returns: + :obj:`BaseInstance3DBoxes`: A new boxes object on the cuda device. + """ + original_type = type(self) + return original_type(self.tensor.cuda(*args, **kwargs), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + def clone(self) -> 'BaseInstance3DBoxes': + """Clone the boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: Box object with the same properties as + self. + """ + original_type = type(self) + return original_type(self.tensor.clone(), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + def detach(self) -> 'BaseInstance3DBoxes': + """Detach the boxes. + + Returns: + :obj:`BaseInstance3DBoxes`: Box object with the same properties as + self. + """ + original_type = type(self) + return original_type(self.tensor.detach(), + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + @property + def device(self) -> torch.device: + """torch.device: The device of the boxes are on.""" + return self.tensor.device + + def __iter__(self) -> Iterator[Tensor]: + """Yield a box as a Tensor at a time. + + Returns: + Iterator[Tensor]: A box of shape (box_dim, ). + """ + yield from self.tensor + + @classmethod + def height_overlaps(cls, boxes1: 'BaseInstance3DBoxes', + boxes2: 'BaseInstance3DBoxes') -> Tensor: + """Calculate height overlaps of two boxes. + + Note: + This function calculates the height overlaps between ``boxes1`` and + ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type. + + Args: + boxes1 (:obj:`BaseInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`BaseInstance3DBoxes`): Boxes 2 contain M boxes. + + Returns: + Tensor: Calculated height overlap of the boxes. + """ + assert isinstance(boxes1, BaseInstance3DBoxes) + assert isinstance(boxes2, BaseInstance3DBoxes) + assert type(boxes1) == type(boxes2), \ + '"boxes1" and "boxes2" should be in the same type, ' \ + f'but got {type(boxes1)} and {type(boxes2)}.' + + boxes1_top_height = boxes1.top_height.view(-1, 1) + boxes1_bottom_height = boxes1.bottom_height.view(-1, 1) + boxes2_top_height = boxes2.top_height.view(1, -1) + boxes2_bottom_height = boxes2.bottom_height.view(1, -1) + + heighest_of_bottom = torch.max(boxes1_bottom_height, + boxes2_bottom_height) + lowest_of_top = torch.min(boxes1_top_height, boxes2_top_height) + overlaps_h = torch.clamp(lowest_of_top - heighest_of_bottom, min=0) + return overlaps_h + + def new_box( + self, data: Union[Tensor, np.ndarray, Sequence[Sequence[float]]] + ) -> 'BaseInstance3DBoxes': + """Create a new box object with data. + + The new box and its tensor has the similar properties as self and + self.tensor, respectively. + + Args: + data (Tensor or np.ndarray or Sequence[Sequence[float]]): Data to + be copied. + + Returns: + :obj:`BaseInstance3DBoxes`: A new bbox object with ``data``, the + object's other properties are similar to ``self``. + """ + new_tensor = self.tensor.new_tensor(data) \ + if not isinstance(data, Tensor) else data.to(self.device) + original_type = type(self) + return original_type(new_tensor, + box_dim=self.box_dim, + with_yaw=self.with_yaw) + + +class EulerInstance3DBoxes(BaseInstance3DBoxes): + """3D boxes with 1-D orientation represented by three Euler angles. + + See https://en.wikipedia.org/wiki/Euler_angles for + regarding the definition of Euler angles. + + Attributes: + tensor (torch.Tensor): Float matrix of N x box_dim. + box_dim (int): Integer indicates the dimension of a box + Each row is (x, y, z, x_size, y_size, z_size, alpha, beta, gamma). + """ + + def __init__(self, tensor, box_dim=9, origin=(0.5, 0.5, 0.5)): + if isinstance(tensor, torch.Tensor): + device = tensor.device + else: + device = torch.device('cpu') + tensor = torch.as_tensor(tensor, dtype=torch.float32, device=device) + if tensor.numel() == 0: + # Use reshape, so we don't end up creating a new tensor that + # does not depend on the inputs (and consequently confuses jit) + tensor = tensor.reshape((0, box_dim)).to(dtype=torch.float32, + device=device) + assert tensor.dim() == 2 and tensor.size(-1) == box_dim, tensor.size() + + if tensor.shape[-1] == 6: + # If the dimension of boxes is 6, we expand box_dim by padding + # (0, 0, 0) as a fake euler angle. + assert box_dim == 6 + fake_rot = tensor.new_zeros(tensor.shape[0], 3) + tensor = torch.cat((tensor, fake_rot), dim=-1) + self.box_dim = box_dim + 3 + elif tensor.shape[-1] == 7: + assert box_dim == 7 + fake_euler = tensor.new_zeros(tensor.shape[0], 2) + tensor = torch.cat((tensor, fake_euler), dim=-1) + self.box_dim = box_dim + 2 + else: + assert tensor.shape[-1] == 9 + self.box_dim = box_dim + self.tensor = tensor.clone() + + self.origin = origin + if origin != (0.5, 0.5, 0.5): + dst = self.tensor.new_tensor((0.5, 0.5, 0.5)) + src = self.tensor.new_tensor(origin) + self.tensor[:, :3] += self.tensor[:, 3:6] * (dst - src) + + def get_corners(self, tensor1): + """torch.Tensor: Coordinates of corners of all the boxes + in shape (N, 8, 3). + + Convert the boxes to corners in clockwise order, in form of + ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)`` + + .. code-block:: none + + up z + front y ^ + / | + / | + (x0, y1, z1) + ----------- + (x1, y1, z1) + /| / | + / | / | + (x0, y0, z1) + ----------- + + (x1, y1, z0) + | / . | / + | / origin | / + (x0, y0, z0) + ----------- + --------> right x + (x1, y0, z0) + """ + if tensor1.numel() == 0: + return torch.empty([0, 8, 3], device=tensor1.device) + + dims = tensor1[:, 3:6] + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), + axis=1)).to(device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin + assert self.origin == (0.5, 0.5, 0.5), \ + 'self.origin != (0.5, 0.5, 0.5) needs to be checked!' + corners_norm = corners_norm - dims.new_tensor(self.origin) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate + corners = rotation_3d_in_euler(corners, tensor1[:, 6:]) + + corners += tensor1[:, :3].view(-1, 1, 3) + return corners + + @classmethod + def overlaps(cls, boxes1, boxes2, mode='iou', eps=1e-4): + """Calculate 3D overlaps of two boxes. + + Note: + This function calculates the overlaps between ``boxes1`` and + ``boxes2``, ``boxes1`` and ``boxes2`` should be in the same type. + + Args: + boxes1 (:obj:`EulerInstance3DBoxes`): Boxes 1 contain N boxes. + boxes2 (:obj:`EulerInstance3DBoxes`): Boxes 2 contain M boxes. + mode (str): Mode of iou calculation. Defaults to 'iou'. + eps (bool): Epsilon. Defaults to 1e-4. + + Returns: + torch.Tensor: Calculated 3D overlaps of the boxes. + """ + assert isinstance(boxes1, EulerInstance3DBoxes) + assert isinstance(boxes2, EulerInstance3DBoxes) + assert type(boxes1) == type(boxes2), '"boxes1" and "boxes2" should' \ + f'be in the same type, got {type(boxes1)} and {type(boxes2)}.' + + assert mode in ['iou'] + + rows = len(boxes1) + cols = len(boxes2) + if rows * cols == 0: + return boxes1.tensor.new(rows, cols) + + corners1 = boxes1.corners + corners2 = boxes2.corners + _, iou3d = box3d_overlap(corners1, corners2, eps=eps) + return iou3d + + @property + def gravity_center(self): + """torch.Tensor: A tensor with center of each box in shape (N, 3).""" + return self.tensor[:, :3] + + @property + def corners(self): + """torch.Tensor: Coordinates of corners of all the boxes + in shape (N, 8, 3). + + Convert the boxes to corners in clockwise order, in form of + ``(x0y0z0, x0y0z1, x0y1z1, x0y1z0, x1y0z0, x1y0z1, x1y1z1, x1y1z0)`` + + .. code-block:: none + + up z + front y ^ + / | + / | + (x0, y1, z1) + ----------- + (x1, y1, z1) + /| / | + / | / | + (x0, y0, z1) + ----------- + + (x1, y1, z0) + | / . | / + | / origin | / + (x0, y0, z0) + ----------- + --------> right x + (x1, y0, z0) + """ + if self.tensor.numel() == 0: + return torch.empty([0, 8, 3], device=self.tensor.device) + + dims = self.dims + corners_norm = torch.from_numpy( + np.stack(np.unravel_index(np.arange(8), [2] * 3), + axis=1)).to(device=dims.device, dtype=dims.dtype) + + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + # use relative origin + assert self.origin == (0.5, 0.5, 0.5), \ + 'self.origin != (0.5, 0.5, 0.5) needs to be checked!' + corners_norm = corners_norm - dims.new_tensor(self.origin) + corners = dims.view([-1, 1, 3]) * corners_norm.reshape([1, 8, 3]) + + # rotate + corners = rotation_3d_in_euler(corners, self.tensor[:, 6:]) + + corners += self.tensor[:, :3].view(-1, 1, 3) + return corners + + def transform(self, matrix): + if self.tensor.shape[0] == 0: + return + if not isinstance(matrix, torch.Tensor): + matrix = self.tensor.new_tensor(matrix) + points = self.tensor[:, :3] + constant = points.new_ones(points.shape[0], 1) + points_extend = torch.concat([points, constant], dim=-1) + points_trans = torch.matmul(points_extend, matrix.transpose(-2, + -1))[:, :3] + + size = self.tensor[:, 3:6] + + # angle_delta = matrix_to_euler_angles(matrix[:3,:3], 'ZXY') + # angle = self.tensor[:,6:] + angle_delta + ori_matrix = euler_angles_to_matrix(self.tensor[:, 6:], 'ZXY') + rot_matrix = matrix[:3, :3].expand_as(ori_matrix) + final = torch.bmm(rot_matrix, ori_matrix) + angle = matrix_to_euler_angles(final, 'ZXY') + + self.tensor = torch.cat([points_trans, size, angle], dim=-1) + + def scale(self, scale_factor: float) -> None: + """Scale the box with horizontal and vertical scaling factors. + + Args: + scale_factors (float): Scale factors to scale the boxes. + """ + self.tensor[:, :6] *= scale_factor + + def rotate(self, angle, points=None): + """Rotate boxes with points (optional) with the given angle or rotation + matrix. + + Args: + angle (float | torch.Tensor | np.ndarray): + Rotation angle or rotation matrix. + points (torch.Tensor | np.ndarray | :obj:``, optional): + Points to rotate. Defaults to None. + + Returns: + tuple or None: When ``points`` is None, the function returns + None, otherwise it returns the rotated points and the + rotation matrix ``rot_mat_T``. + """ + if not isinstance(angle, torch.Tensor): + angle = self.tensor.new_tensor(angle) + + if angle.numel() == 1: # only given yaw angle for rotation + angle = self.tensor.new_tensor([angle, 0., 0.]) + rot_matrix = euler_angles_to_matrix(angle, 'ZXY') + elif angle.numel() == 3: + rot_matrix = euler_angles_to_matrix(angle, 'ZXY') + elif angle.shape == torch.Size([3, 3]): + rot_matrix = angle + else: + raise NotImplementedError + + rot_mat_T = rot_matrix.T + transform_matrix = torch.eye(4) + transform_matrix[:3, :3] = rot_matrix + self.transform(transform_matrix) + + if points is not None: + if isinstance(points, torch.Tensor): + points[:, :3] = points[:, :3] @ rot_mat_T + elif isinstance(points, np.ndarray): + rot_mat_T = rot_mat_T.cpu().numpy() + points[:, :3] = np.dot(points[:, :3], rot_mat_T) + elif isinstance(points, ): + points.rotate(rot_mat_T) + else: + raise ValueError + return points, rot_mat_T + else: + return rot_mat_T + + def flip(self, direction='X'): + """Flip the boxes along the corresponding axis. + + Args: + direction (str, optional): Flip axis. Defaults to 'X'. + """ + assert direction in ['X', 'Y', 'Z'] + if direction == 'X': + self.tensor[:, 0] = -self.tensor[:, 0] + self.tensor[:, 6] = -self.tensor[:, 6] + np.pi + self.tensor[:, 8] = -self.tensor[:, 8] + elif direction == 'Y': + self.tensor[:, 1] = -self.tensor[:, 1] + self.tensor[:, 6] = -self.tensor[:, 6] + self.tensor[:, 7] = -self.tensor[:, 7] + np.pi + elif direction == 'Z': + self.tensor[:, 2] = -self.tensor[:, 2] + self.tensor[:, 7] = -self.tensor[:, 7] + self.tensor[:, 8] = -self.tensor[:, 8] + np.pi + + +def rotation_3d_in_euler(points, angles, return_mat=False, clockwise=False): + """Rotate points by angles according to axis. + + Args: + points (np.ndarray | torch.Tensor | list | tuple ): + Points of shape (N, M, 3). + angles (np.ndarray | torch.Tensor | list | tuple): + Vector of angles in shape (N, 3) + return_mat: Whether or not return the rotation matrix (transposed). + Defaults to False. + clockwise: Whether the rotation is clockwise. Defaults to False. + + Raises: + ValueError: when the axis is not in range [0, 1, 2], it will + raise value error. + + Returns: + (torch.Tensor | np.ndarray): Rotated points in shape (N, M, 3). + """ + batch_free = len(points.shape) == 2 + if batch_free: + points = points[None] + + if len(angles.shape) == 1: + angles = angles.expand(points.shape[:1] + (3, )) + # angles = torch.full(points.shape[:1], angles) + + assert len(points.shape) == 3 and len(angles.shape) == 2 \ + and points.shape[0] == angles.shape[0], f'Incorrect shape of points ' \ + f'angles: {points.shape}, {angles.shape}' + + assert points.shape[-1] in [2, 3], \ + f'Points size should be 2 or 3 instead of {points.shape[-1]}' + + rot_mat_T = euler_angles_to_matrix(angles, 'ZXY') # N, 3,3 + rot_mat_T = rot_mat_T.transpose(-2, -1) + + if clockwise: + raise NotImplementedError('clockwise') + + if points.shape[0] == 0: + points_new = points + else: + points_new = torch.bmm(points, rot_mat_T) + + if batch_free: + points_new = points_new.squeeze(0) + + if return_mat: + if batch_free: + rot_mat_T = rot_mat_T.squeeze(0) + return points_new, rot_mat_T + else: + return points_new diff --git a/mmscan/utils/lang_utils.py b/mmscan/utils/lang_utils.py new file mode 100644 index 0000000..840706f --- /dev/null +++ b/mmscan/utils/lang_utils.py @@ -0,0 +1,240 @@ +# flake8: noqa +# refer to LEO: embodied-generalist +# https://github.com/embodied-generalist/embodied-generalist/blob/477dc44b8b18dbfbe6823c307436d896ec8b062e/data/data_utils.py#L322-L379 +import re +import string + + +def clean_answer(data): + """Help to clean and unify the sentence. + + Args: + data (str): the raw sentence. + + Returns: + data (str): the processed sentence. + """ + + data = data.lower() + data = re.sub('[ ]+$', '', data) + data = re.sub('^[ ]+', '', data) + data = re.sub(' {2,}', ' ', data) + + data = re.sub('\.[ ]{2,}', '. ', data) + data = re.sub("[^a-zA-Z0-9,'\s\-:]+", '', data) + data = re.sub('ç', 'c', data) + data = re.sub('’', "'", data) + data = re.sub(r'\bletf\b', 'left', data) + data = re.sub(r'\blet\b', 'left', data) + data = re.sub(r'\btehre\b', 'there', data) + data = re.sub(r'\brigth\b', 'right', data) + data = re.sub(r'\brght\b', 'right', data) + data = re.sub(r'\bbehine\b', 'behind', data) + data = re.sub(r'\btv\b', 'TV', data) + data = re.sub(r'\bchai\b', 'chair', data) + data = re.sub(r'\bwasing\b', 'washing', data) + data = re.sub(r'\bwaslked\b', 'walked', data) + data = re.sub(r'\boclock\b', "o'clock", data) + data = re.sub(r"\bo\'[ ]+clock\b", "o'clock", data) + + # digit to word, only for answer + data = re.sub(r'\b0\b', 'zero', data) + data = re.sub(r'\bnone\b', 'zero', data) + data = re.sub(r'\b1\b', 'one', data) + data = re.sub(r'\b2\b', 'two', data) + data = re.sub(r'\b3\b', 'three', data) + data = re.sub(r'\b4\b', 'four', data) + data = re.sub(r'\b5\b', 'five', data) + data = re.sub(r'\b6\b', 'six', data) + data = re.sub(r'\b7\b', 'seven', data) + data = re.sub(r'\b8\b', 'eight', data) + data = re.sub(r'\b9\b', 'nine', data) + data = re.sub(r'\b10\b', 'ten', data) + data = re.sub(r'\b11\b', 'eleven', data) + data = re.sub(r'\b12\b', 'twelve', data) + data = re.sub(r'\b13\b', 'thirteen', data) + data = re.sub(r'\b14\b', 'fourteen', data) + data = re.sub(r'\b15\b', 'fifteen', data) + data = re.sub(r'\b16\b', 'sixteen', data) + data = re.sub(r'\b17\b', 'seventeen', data) + data = re.sub(r'\b18\b', 'eighteen', data) + data = re.sub(r'\b19\b', 'nineteen', data) + data = re.sub(r'\b20\b', 'twenty', data) + data = re.sub(r'\b23\b', 'twenty-three', data) + + # misc + # no1, mat2, etc + data = re.sub(r'\b([a-zA-Z]+)([0-9])\b', r'\g<1>', data) + data = re.sub(r'\ba\b ([a-zA-Z]+)', r'\g<1>', data) + data = re.sub(r'\ban\b ([a-zA-Z]+)', r'\g<1>', data) + data = re.sub(r'\bthe\b ([a-zA-Z]+)', r'\g<1>', data) + data = re.sub(r'\bbackwards\b', 'backward', data) + + return data + + +def normalize_answer(s): + """Help to 'normalize' the answer. + + Args: + s (str): the raw answer. + + Returns: + str : the processed sentence. + """ + + def remove_articles(text): + return re.sub(r'\b(a|an|the)\b', ' ', text) + + def white_space_fix(text): + return ' '.join(text.split()) + + def remove_punc(text): + exclude = set(string.punctuation) + return ''.join(ch for ch in text if ch not in exclude) + + def lower(text): + return text.lower() + + return white_space_fix(remove_articles(remove_punc(lower(s)))) + + +def exact_match_score(prediction, ground_truth): + """Collect the refined exact match score between prediction and ground + truth. + + Args: + prediction (str): thr predicted answer. + ground_truth (str): the gt answer. + + Returns: + float : the exact match score + """ + + return normalize_answer(prediction) == normalize_answer(ground_truth) + + +def special_token_filter(lan, clean=True, truncation=True, max_length=1024): + """ + Usage: + clean the language, remove stop words and special tokens + Args: + lan: List[str], language to be cleaned + clean: bool, if apply LEO clean strategy + truncation: to avoid crash pycocoevalcap the + input sentence will be truncated to max_length + max_length: You may set this to the max length of possible gt answer + """ + + replacements = { + 'ASSISTANT:': '', + 'ASSISTANT: ': '', + '\n': '', + '': '', + '': '', + '': '', + '

': '', + '

': '', + '': '', + '<|endoftext|>': '', # for GPT2 + } + for old, new in replacements.items(): + lan = lan.replace(old, new) + lan = lan.strip() + lan = re.sub(r'\s{2,}', ' ', lan) + if truncation: + if len(lan) > max_length: + lan = lan[:max_length] + if clean: + lan = clean_answer(lan) + return lan + + +def qa_prompt_define(): + """Define the system prompt and example instance. + + Returns: + system_prompt : str, system prompt input into GPT + ex_instance : str, example instance of the input and expected output in JSON format + """ + + system_prompt = ( + 'Evaluate a model-generated QA result against a human-generated answer for a 3D model.' + + + ' I will give you a dict with "Question", "Model Answer" and "Human Answer".Please fully understand the' + + + ' meaning of both s and follow these three steps to evaluate the model-generated answer: First step, ' + + + 'identify all key points in the human answer and list them; Scecond step, compare each of these key points' + + + ' in the model-generated answer, count the number of key points which are correct in the model-generated ' + + + 'answer, also, count the number of key points which are missing or error in the model-generated answer. ' + + + 'Provide reasons for each evaluation, you should not be too strict, as long as a key point has no significant' + + + ' difference in model and human, regard it as correct one; Third step, output the "All key points" (list), ' + + + '"Correct Number" (int), "Wrong/Missing Number"(int), "Reasons" (str) in a JSON format.( Obviously the ' + + + '"Correct Number"+"Wrong/Missing Number" should be equal to the total number of key points). Give you some examples: ' + ) + ex_instance = ( + 'The input is: { "Question" : "What is the function of this object?", "Model Answer" : ' + + + '"It can hang clothes for storage.", "Human Answer" : "Providing storage space." }, the expected ' + + + 'output is { "All key points" : ["function of the object: providing storage space"], "Correct ' + + + 'Number" : 1, "Wrong/Missing Number" : 0, "Reasons" : "A place for hanging clothes also provides storage space." }; ' + ) + ex_instance += ( + 'The input is: { "Question" : "What is the placement of this object?", "Model Answer" : "It is ' + + + 'placed vertically on the table", "Human Answer" : "It placement is standing upright on the floor. " }, the ' + + + 'expected output is { "All key points" : ["placement of the object: standing upright","surface the object is ' + + + 'standing upright on: on the floor"], "Correct Number" : 1, "Wrong/Missing Number" : 1, "Reasons" : "The model' + + + ' correctly identifies the object is standing but fails to identify the surface it is standing on." }; ' + ) + ex_instance += ( + 'The input is { "Question" : "Please compare these two objects closely, are they similar in ' + + + 'material? Given me the answer and reason.", "Model Answer" : "No, because the object is made of plastic ' + + + 'and the pillow is made of cotton.", "Human Answer" : "No, because the bowl is wooden and the pillow is ' + + + 'soft fabric."}, the expected output is { "All key points" : ["Yes or No : No","Texture of the bowl : ' + + + 'wooden ","Texture of the pillow : soft fabric"], "Correct Number" : 2, "Wrong/Missing Number" : 1,' + + + ' "Reasons" : "The model correctly identifies the material of pillow (cotton is soft fabric) but ' + + 'fails to recognize the material of the bowl." }. ') + + return system_prompt, ex_instance + + +def qa_metric_map(eval_type): + """Map the class type to the corresponding Abbrev. + + Args: + eval_type (str): the class name. + + Returns: + str : the corresponding Abbrev. + """ + if 'Attribute_OO' in eval_type: + target = 'OOa' + elif 'Space_OO' in eval_type: + target = 'OOs' + elif 'EQ' in eval_type or 'Single_Attribute' in eval_type: + target = 'STa' + elif 'OR' in eval_type: + target = 'OR' + elif 'Single_Space' in eval_type: + target = 'STs' + elif 'Advanced' in eval_type: + target = 'Advanced' + return target diff --git a/mmscan/utils/task_utils.py b/mmscan/utils/task_utils.py new file mode 100644 index 0000000..e4c5b8e --- /dev/null +++ b/mmscan/utils/task_utils.py @@ -0,0 +1,41 @@ +from typing import List + + +def anno_token_flatten(samples: List[dict], keep_only_one: bool = True): + """Flatten the annotation tokens for each target in a 3d visual grounding + sample. + + Args: + samples (list[dict]): The original VG samples. + keep_only_one (bool): + Whether to keep only one positive token for each target. + Defaults to True. + + Returns: + List[dict] : The token-flattened samples. + """ + + marked_indices = [] + for i, d in enumerate(samples): + target_ids = d['target_id'] + ret_target_ids = [] + ret_target = [] + ret_tps = [] + for i, target_id in enumerate(target_ids): + tps = d['tokens_positive'].get(str(target_id), []) + for tp in tps: + ret_target_ids.append(target_id) + ret_target.append(d['target'][i]) + ret_tps.append(tp) + if keep_only_one: + break + d['target_id'] = ret_target_ids + d['target'] = ret_target + d['tokens_positive'] = ret_tps + if len(d['target_id']) == 0: + marked_indices.append(i) + + for i in marked_indices[::-1]: + del samples[i] + + return samples diff --git a/LICENSE b/models/EmbodiedScan/LICENSE similarity index 100% rename from LICENSE rename to models/EmbodiedScan/LICENSE diff --git a/configs/default_runtime.py b/models/EmbodiedScan/configs/default_runtime.py similarity index 100% rename from configs/default_runtime.py rename to models/EmbodiedScan/configs/default_runtime.py diff --git a/configs/detection/cont-det3d_8xb1_embodiedscan-3d-284class-9dof.py b/models/EmbodiedScan/configs/detection/cont-det3d_8xb1_embodiedscan-3d-284class-9dof.py similarity index 100% rename from configs/detection/cont-det3d_8xb1_embodiedscan-3d-284class-9dof.py rename to models/EmbodiedScan/configs/detection/cont-det3d_8xb1_embodiedscan-3d-284class-9dof.py diff --git a/configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py b/models/EmbodiedScan/configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py similarity index 100% rename from configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py rename to models/EmbodiedScan/configs/detection/mv-det3d_8xb4_embodiedscan-3d-284class-9dof.py diff --git a/models/EmbodiedScan/configs/grounding/mv-grounding_1xb1_embodiedscan-vg-9dof.py b/models/EmbodiedScan/configs/grounding/mv-grounding_1xb1_embodiedscan-vg-9dof.py new file mode 100644 index 0000000..4bcb7ab --- /dev/null +++ b/models/EmbodiedScan/configs/grounding/mv-grounding_1xb1_embodiedscan-vg-9dof.py @@ -0,0 +1,216 @@ +_base_ = ['../default_runtime.py'] +n_points = 100000 + +backend_args = None +# Uncomment the following if use ceph or other file clients. +# See https://mmcv.readthedocs.io/en/latest/api.html#mmcv.fileio.FileClient +# for more details. +# file_client_args = dict( +# backend='petrel', +# path_mapping=dict({ +# './data/scannet/': +# 's3://openmmlab/datasets/detection3d/scannet_processed/', +# 'data/scannet/': +# 's3://openmmlab/datasets/detection3d/scannet_processed/' +# })) + +metainfo = dict(classes='all') + +model = dict( + type='SparseFeatureFusion3DGrounder', + num_queries=256, + voxel_size=0.01, + data_preprocessor=dict(type='Det3DDataPreprocessor', + mean=[123.675, 116.28, 103.53], + std=[58.395, 57.12, 57.375], + bgr_to_rgb=True, + pad_size_divisor=32), + backbone=dict( + type='mmdet.ResNet', + depth=50, + base_channels=16, # to make it consistent with mink resnet + num_stages=4, + out_indices=(0, 1, 2, 3), + frozen_stages=1, + norm_cfg=dict(type='BN', requires_grad=False), + norm_eval=True, + init_cfg=dict(type='Pretrained', checkpoint='torchvision://resnet50'), + style='pytorch'), + backbone_3d=dict(type='MinkResNet', in_channels=3, depth=34), + use_xyz_feat=True, + # change due to no img feature fusion + neck_3d=dict(type='MinkNeck', + num_classes=1, + in_channels=[128, 256, 512, 1024], + out_channels=256, + voxel_size=0.01, + pts_prune_threshold=1000), + decoder=dict( + num_layers=6, + return_intermediate=True, + layer_cfg=dict( + # query self attention layer + self_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to text + cross_attn_text_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + # cross attention layer query to image + cross_attn_cfg=dict(embed_dims=256, num_heads=8, dropout=0.0), + ffn_cfg=dict(embed_dims=256, + feedforward_channels=2048, + ffn_drop=0.0)), + post_norm_cfg=None), + bbox_head=dict(type='GroundingHead', + num_classes=256, + sync_cls_avg_factor=True, + decouple_bbox_loss=True, + decouple_groups=4, + share_pred_layer=True, + decouple_weights=[0.2, 0.2, 0.2, 0.4], + contrastive_cfg=dict(max_text_len=256, + log_scale='auto', + bias=True), + loss_cls=dict(type='mmdet.FocalLoss', + use_sigmoid=True, + gamma=2.0, + alpha=0.25, + loss_weight=1.0), + loss_bbox=dict(type='BBoxCDLoss', + mode='l1', + loss_weight=1.0, + group='g8')), + coord_type='DEPTH', + # training and testing settings + train_cfg=dict(assigner=dict(type='HungarianAssigner3D', + match_costs=[ + dict(type='BinaryFocalLossCost', + weight=1.0), + dict(type='BBox3DL1Cost', weight=2.0), + dict(type='IoU3DCost', weight=2.0) + ]), ), + test_cfg=None) + +dataset_type = 'MultiView3DGroundingDataset' +data_root = 'data' + +train_pipeline = [ + dict(type='LoadAnnotations3D'), + dict(type='MultiViewPipeline', + n_images=20, + transforms=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadDepthFromFile', backend_args=backend_args), + dict(type='ConvertRGBDToPoints', coord_type='CAMERA'), + dict(type='PointSample', num_points=n_points // 10), + dict(type='Resize', scale=(480, 480), keep_ratio=False) + ]), + dict(type='AggregateMultiViewPoints', coord_type='DEPTH'), + dict(type='PointSample', num_points=n_points), + dict(type='GlobalRotScaleTrans', + rot_range=[-0.087266, 0.087266], + scale_ratio_range=[.9, 1.1], + translation_std=[.1, .1, .1], + shift_height=False), + dict(type='Pack3DDetInputs', + keys=['img', 'points', 'gt_bboxes_3d', 'gt_labels_3d']) +] +test_pipeline = [ + dict(type='LoadAnnotations3D'), + dict(type='MultiViewPipeline', + n_images=50, + ordered=True, + transforms=[ + dict(type='LoadImageFromFile', backend_args=backend_args), + dict(type='LoadDepthFromFile', backend_args=backend_args), + dict(type='ConvertRGBDToPoints', coord_type='CAMERA'), + dict(type='PointSample', num_points=n_points // 10), + dict(type='Resize', scale=(480, 480), keep_ratio=False) + ]), + dict(type='AggregateMultiViewPoints', coord_type='DEPTH'), + dict(type='PointSample', num_points=n_points), + dict(type='Pack3DDetInputs', + keys=['img', 'points', 'gt_bboxes_3d', 'gt_labels_3d']) +] + +# TODO: to determine a reasonable batch size +train_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + sampler=dict(type='DefaultSampler', shuffle=True), + dataset=dict( + type='RepeatDataset', + times=1, + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='embodiedscan_infos_train.pkl', + vg_file= + 'es_gen_text/vg_full/VG_train_20Percent_flattened_token_positive.json', + metainfo=metainfo, + pipeline=train_pipeline, + test_mode=False, + filter_empty_gt=True, + box_type_3d='Euler-Depth'))) + +val_dataloader = dict( + batch_size=1, + num_workers=1, + persistent_workers=True, + drop_last=False, + sampler=dict(type='DefaultSampler', shuffle=False), + dataset=dict( + type=dataset_type, + data_root=data_root, + ann_file='embodiedscan_infos_val.pkl', + vg_file= + 'es_gen_text/vg_full/VG_val_5Percent_flattened_token_positive.json', + metainfo=metainfo, + pipeline=test_pipeline, + test_mode=True, + filter_empty_gt=True, + box_type_3d='Euler-Depth')) +test_dataloader = val_dataloader + +val_evaluator = dict(type='GroundingMetric') +test_evaluator = val_evaluator + +# training schedule for 1x +train_cfg = dict(type='EpochBasedTrainLoop', max_epochs=12, val_interval=3) +val_cfg = dict(type='ValLoop') +test_cfg = dict(type='TestLoop') + +# optimizer +lr = 5e-4 +optim_wrapper = dict(type='OptimWrapper', + optimizer=dict(type='AdamW', lr=lr, weight_decay=0.0005), + paramwise_cfg=dict( + custom_keys={ + 'text_encoder': dict(lr_mult=0.0), + 'decoder': dict(lr_mult=0.1, decay_mult=1.0) + }), + clip_grad=dict(max_norm=10, norm_type=2)) + +# learning rate +param_scheduler = dict(type='MultiStepLR', + begin=0, + end=12, + by_epoch=True, + milestones=[8, 11], + gamma=0.1) + +custom_hooks = [dict(type='EmptyCacheHook', after_iter=True)] + +# hooks +default_hooks = dict( + checkpoint=dict(type='CheckpointHook', interval=1, max_keep_ckpts=3)) + +# vis_backends = [ +# dict(type='TensorboardVisBackend'), +# dict(type='LocalVisBackend') +# ] +# visualizer = dict( +# type='Det3DLocalVisualizer', +# vis_backends=vis_backends, name='visualizer') + +find_unused_parameters = True +load_from = '/mnt/petrelfs/lvruiyuan/repos/EmbodiedScan/work_dirs/mv-3ddet/mv-grounding.pth' # noqa diff --git a/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py b/models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py similarity index 100% rename from configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py rename to models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof-full.py diff --git a/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py b/models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py similarity index 100% rename from configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py rename to models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py diff --git a/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_complex-all.py b/models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_complex-all.py similarity index 100% rename from configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_complex-all.py rename to models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_complex-all.py diff --git a/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_fcaf-coder.py b/models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_fcaf-coder.py similarity index 100% rename from configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_fcaf-coder.py rename to models/EmbodiedScan/configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof_fcaf-coder.py diff --git a/models/EmbodiedScan/configs/grounding/pcd_4xb24_mmscan_vg_num100.py b/models/EmbodiedScan/configs/grounding/pcd_4xb24_mmscan_vg_num100.py new file mode 100644 index 0000000..ecac007 --- /dev/null +++ b/models/EmbodiedScan/configs/grounding/pcd_4xb24_mmscan_vg_num100.py @@ -0,0 +1,254 @@ +# edit it +load_from = '/path/to/mv-3ddet.pth' +backend_args = None +custom_hooks = [ + dict(after_iter=True, type='EmptyCacheHook'), +] +data_root = 'data' +dataset_type = 'PointCloud3DGroundingDataset' +default_hooks = dict(checkpoint=dict(interval=1, + max_keep_ckpts=3, + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook')) +default_scope = 'embodiedscan' +env_cfg = dict(cudnn_benchmark=False, + dist_cfg=dict(backend='nccl', port=22873), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +find_unused_parameters = True +launcher = 'slurm' + +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +lr = 0.0005 +metainfo = dict(classes='all') +model = dict(backbone_3d=dict(depth=34, in_channels=6, type='MinkResNet'), + bbox_head=dict(contrastive_cfg=dict(bias=True, + log_scale='auto', + max_text_len=256), + decouple_bbox_loss=True, + decouple_groups=4, + decouple_weights=[ + 0.2, + 0.2, + 0.2, + 0.4, + ], + loss_bbox=dict(group='g8', + loss_weight=1.0, + mode='l1', + type='BBoxCDLoss'), + loss_cls=dict(alpha=0.25, + gamma=2.0, + loss_weight=1.0, + type='mmdet.FocalLoss', + use_sigmoid=True), + num_classes=256, + share_pred_layer=True, + sync_cls_avg_factor=True, + type='GroundingHead'), + coord_type='DEPTH', + data_preprocessor=dict(bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + pad_size_divisor=32, + std=[ + 58.395, + 57.12, + 57.375, + ], + type='Det3DDataPreprocessor'), + decoder=dict(layer_cfg=dict( + cross_attn_cfg=dict(dropout=0.0, embed_dims=256, num_heads=8), + cross_attn_text_cfg=dict(dropout=0.0, + embed_dims=256, + num_heads=8), + ffn_cfg=dict(embed_dims=256, + feedforward_channels=2048, + ffn_drop=0.0), + self_attn_cfg=dict(dropout=0.0, embed_dims=256, num_heads=8)), + num_layers=6, + post_norm_cfg=None, + return_intermediate=True), + neck_3d=dict(in_channels=[ + 64, + 128, + 256, + 512, + ], + num_classes=1, + out_channels=256, + pts_prune_threshold=1000, + type='MinkNeck', + voxel_size=0.01), + num_queries=100, + test_cfg=None, + train_cfg=dict(assigner=dict(match_costs=[ + dict(type='BinaryFocalLossCost', weight=1.0), + dict(type='BBox3DL1Cost', weight=2.0), + dict(type='IoU3DCost', weight=2.0), + ], + type='HungarianAssigner3D')), + type='SparseFeatureFusion3DGrounderMod', + use_xyz_feat=True, + voxel_size=0.01) +n_points = 100000 +optim_wrapper = dict( + clip_grad=dict(max_norm=10, norm_type=2), + optimizer=dict(lr=0.0005, type='AdamW', weight_decay=0.0005), + paramwise_cfg=dict( + custom_keys=dict(decoder=dict(decay_mult=1.0, lr_mult=0.1), + text_encoder=dict(lr_mult=0.0))), + type='OptimWrapper') +param_scheduler = dict(begin=0, + by_epoch=True, + end=12, + gamma=0.1, + milestones=[ + 8, + 11, + ], + type='MultiStepLR') +resume = False +test_cfg = dict(type='TestLoop') +test_dataloader = dict(batch_size=24, + dataset=dict(ann_file='embodiedscan_infos_val.pkl', + box_type_3d='Euler-Depth', + data_root='data', + filter_empty_gt=True, + metainfo=dict(classes='all'), + pipeline=[ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, + type='PointSample'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), + ], + test_mode=True, + tokens_positive_rebuild=True, + type='MMScanPointCloud3DGroundingDataset', + vg_file=''), + drop_last=False, + num_workers=12, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict(type='GroundingMetricMod') +test_pipeline = [ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, type='PointSample'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), +] +train_cfg = dict(max_epochs=12, type='EpochBasedTrainLoop', val_interval=3) +train_dataloader = dict(batch_size=24, + dataset=dict(dataset=dict( + ann_file='embodiedscan_infos_train.pkl', + box_type_3d='Euler-Depth', + data_root='data', + filter_empty_gt=True, + metainfo=dict(classes='all'), + pipeline=[ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, type='PointSample'), + dict(rot_range=[ + -0.087266, + 0.087266, + ], + scale_ratio_range=[ + 0.9, + 1.1, + ], + shift_height=False, + translation_std=[ + 0.1, + 0.1, + 0.1, + ], + type='GlobalRotScaleTrans'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), + ], + test_mode=False, + tokens_positive_rebuild=True, + type='MMScanPointCloud3DGroundingDataset', + vg_file=''), + times=1, + type='RepeatDataset'), + num_workers=12, + persistent_workers=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_pipeline = [ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, type='PointSample'), + dict(rot_range=[ + -0.087266, + 0.087266, + ], + scale_ratio_range=[ + 0.9, + 1.1, + ], + shift_height=False, + translation_std=[ + 0.1, + 0.1, + 0.1, + ], + type='GlobalRotScaleTrans'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), +] +val_cfg = dict(type='ValLoop') +val_dataloader = dict(batch_size=24, + dataset=dict(ann_file='embodiedscan_infos_val.pkl', + box_type_3d='Euler-Depth', + data_root='data', + filter_empty_gt=True, + metainfo=dict(classes='all'), + pipeline=[ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, + type='PointSample'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), + ], + test_mode=True, + tokens_positive_rebuild=True, + type='MMScanPointCloud3DGroundingDataset', + vg_file=''), + drop_last=False, + num_workers=12, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict(type='GroundingMetricMod') +work_dir = '/mnt/petrelfs/lvruiyuan/repos/EmbodiedScan/work_dirs/pcd-mmscan-grounding-20Per-100queries-load' diff --git a/models/EmbodiedScan/configs/grounding/pcd_4xb24_mmscan_vg_num256.py b/models/EmbodiedScan/configs/grounding/pcd_4xb24_mmscan_vg_num256.py new file mode 100644 index 0000000..ced114a --- /dev/null +++ b/models/EmbodiedScan/configs/grounding/pcd_4xb24_mmscan_vg_num256.py @@ -0,0 +1,254 @@ +# edit it +load_from = '/path/to/mv-3ddet.pth' +backend_args = None +custom_hooks = [ + dict(after_iter=True, type='EmptyCacheHook'), +] +data_root = 'data' +dataset_type = 'PointCloud3DGroundingDataset' +default_hooks = dict(checkpoint=dict(interval=1, + max_keep_ckpts=3, + type='CheckpointHook'), + logger=dict(interval=50, type='LoggerHook'), + param_scheduler=dict(type='ParamSchedulerHook'), + sampler_seed=dict(type='DistSamplerSeedHook'), + timer=dict(type='IterTimerHook')) +default_scope = 'embodiedscan' +env_cfg = dict(cudnn_benchmark=False, + dist_cfg=dict(backend='nccl', port=22873), + mp_cfg=dict(mp_start_method='fork', opencv_num_threads=0)) +find_unused_parameters = True +launcher = 'slurm' + +log_level = 'INFO' +log_processor = dict(by_epoch=True, type='LogProcessor', window_size=50) +lr = 0.0005 +metainfo = dict(classes='all') +model = dict(backbone_3d=dict(depth=34, in_channels=6, type='MinkResNet'), + bbox_head=dict(contrastive_cfg=dict(bias=True, + log_scale='auto', + max_text_len=256), + decouple_bbox_loss=True, + decouple_groups=4, + decouple_weights=[ + 0.2, + 0.2, + 0.2, + 0.4, + ], + loss_bbox=dict(group='g8', + loss_weight=1.0, + mode='l1', + type='BBoxCDLoss'), + loss_cls=dict(alpha=0.25, + gamma=2.0, + loss_weight=1.0, + type='mmdet.FocalLoss', + use_sigmoid=True), + num_classes=256, + share_pred_layer=True, + sync_cls_avg_factor=True, + type='GroundingHead'), + coord_type='DEPTH', + data_preprocessor=dict(bgr_to_rgb=True, + mean=[ + 123.675, + 116.28, + 103.53, + ], + pad_size_divisor=32, + std=[ + 58.395, + 57.12, + 57.375, + ], + type='Det3DDataPreprocessor'), + decoder=dict(layer_cfg=dict( + cross_attn_cfg=dict(dropout=0.0, embed_dims=256, num_heads=8), + cross_attn_text_cfg=dict(dropout=0.0, + embed_dims=256, + num_heads=8), + ffn_cfg=dict(embed_dims=256, + feedforward_channels=2048, + ffn_drop=0.0), + self_attn_cfg=dict(dropout=0.0, embed_dims=256, num_heads=8)), + num_layers=6, + post_norm_cfg=None, + return_intermediate=True), + neck_3d=dict(in_channels=[ + 64, + 128, + 256, + 512, + ], + num_classes=1, + out_channels=256, + pts_prune_threshold=1000, + type='MinkNeck', + voxel_size=0.01), + num_queries=256, + test_cfg=None, + train_cfg=dict(assigner=dict(match_costs=[ + dict(type='BinaryFocalLossCost', weight=1.0), + dict(type='BBox3DL1Cost', weight=2.0), + dict(type='IoU3DCost', weight=2.0), + ], + type='HungarianAssigner3D')), + type='SparseFeatureFusion3DGrounderMod', + use_xyz_feat=True, + voxel_size=0.01) +n_points = 100000 +optim_wrapper = dict( + clip_grad=dict(max_norm=10, norm_type=2), + optimizer=dict(lr=0.0005, type='AdamW', weight_decay=0.0005), + paramwise_cfg=dict( + custom_keys=dict(decoder=dict(decay_mult=1.0, lr_mult=0.1), + text_encoder=dict(lr_mult=0.0))), + type='OptimWrapper') +param_scheduler = dict(begin=0, + by_epoch=True, + end=12, + gamma=0.1, + milestones=[ + 8, + 11, + ], + type='MultiStepLR') +resume = False +test_cfg = dict(type='TestLoop') +test_dataloader = dict(batch_size=24, + dataset=dict(ann_file='embodiedscan_infos_val.pkl', + box_type_3d='Euler-Depth', + data_root='data', + filter_empty_gt=True, + metainfo=dict(classes='all'), + pipeline=[ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, + type='PointSample'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), + ], + test_mode=True, + tokens_positive_rebuild=True, + type='MMScanPointCloud3DGroundingDataset', + vg_file=''), + drop_last=False, + num_workers=12, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +test_evaluator = dict(type='GroundingMetricMod') +test_pipeline = [ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, type='PointSample'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), +] +train_cfg = dict(max_epochs=12, type='EpochBasedTrainLoop', val_interval=3) +train_dataloader = dict(batch_size=24, + dataset=dict(dataset=dict( + ann_file='embodiedscan_infos_train.pkl', + box_type_3d='Euler-Depth', + data_root='data', + filter_empty_gt=True, + metainfo=dict(classes='all'), + pipeline=[ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, type='PointSample'), + dict(rot_range=[ + -0.087266, + 0.087266, + ], + scale_ratio_range=[ + 0.9, + 1.1, + ], + shift_height=False, + translation_std=[ + 0.1, + 0.1, + 0.1, + ], + type='GlobalRotScaleTrans'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), + ], + test_mode=False, + tokens_positive_rebuild=True, + type='MMScanPointCloud3DGroundingDataset', + vg_file=''), + times=1, + type='RepeatDataset'), + num_workers=12, + persistent_workers=True, + sampler=dict(shuffle=True, type='DefaultSampler')) +train_pipeline = [ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, type='PointSample'), + dict(rot_range=[ + -0.087266, + 0.087266, + ], + scale_ratio_range=[ + 0.9, + 1.1, + ], + shift_height=False, + translation_std=[ + 0.1, + 0.1, + 0.1, + ], + type='GlobalRotScaleTrans'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), +] +val_cfg = dict(type='ValLoop') +val_dataloader = dict(batch_size=24, + dataset=dict(ann_file='embodiedscan_infos_val.pkl', + box_type_3d='Euler-Depth', + data_root='data', + filter_empty_gt=True, + metainfo=dict(classes='all'), + pipeline=[ + dict(type='LoadAnnotations3D'), + dict(type='DefaultPipeline'), + dict(num_points=100000, + type='PointSample'), + dict(keys=[ + 'points', + 'gt_bboxes_3d', + 'gt_labels_3d', + ], + type='Pack3DDetInputs'), + ], + test_mode=True, + tokens_positive_rebuild=True, + type='MMScanPointCloud3DGroundingDataset', + vg_file=''), + drop_last=False, + num_workers=12, + persistent_workers=True, + sampler=dict(shuffle=False, type='DefaultSampler')) +val_evaluator = dict(type='GroundingMetricMod') +work_dir = '/mnt/petrelfs/lvruiyuan/repos/EmbodiedScan/work_dirs/pcd-mmscan-grounding-20Per-100queries-load' diff --git a/configs/occupancy/cont-occ_8xb1_embodiedscan-occ-80class.py b/models/EmbodiedScan/configs/occupancy/cont-occ_8xb1_embodiedscan-occ-80class.py similarity index 100% rename from configs/occupancy/cont-occ_8xb1_embodiedscan-occ-80class.py rename to models/EmbodiedScan/configs/occupancy/cont-occ_8xb1_embodiedscan-occ-80class.py diff --git a/configs/occupancy/mv-occ_8xb1_embodiedscan-occ-80class.py b/models/EmbodiedScan/configs/occupancy/mv-occ_8xb1_embodiedscan-occ-80class.py similarity index 100% rename from configs/occupancy/mv-occ_8xb1_embodiedscan-occ-80class.py rename to models/EmbodiedScan/configs/occupancy/mv-occ_8xb1_embodiedscan-occ-80class.py diff --git a/demo/demo.ipynb b/models/EmbodiedScan/demo/demo.ipynb similarity index 99% rename from demo/demo.ipynb rename to models/EmbodiedScan/demo/demo.ipynb index 138e2e7..6e001a1 100644 --- a/demo/demo.ipynb +++ b/models/EmbodiedScan/demo/demo.ipynb @@ -89,7 +89,7 @@ " config.merge_from_dict(cfg_options)\n", "\n", " config.model.train_cfg = None\n", - " init_default_scope(config.get('default_scope', 'mmdet3d'))\n", + " init_default_scope(config.get('default_scope', 'embodiedscan'))\n", " model = MODELS.build(config.model)\n", "\n", " if checkpoint is not None:\n", @@ -122,15 +122,10 @@ " model.eval()\n", " return model\n", "\n", - "config_path = '../configs/detection/cont-det3d_8xb1_embodiedscan-3d-284class-9dof.py'\n", + "config_path = '../config/detection/embodied-det3d_8xb1_embodiedscan-3d-284class-9dof-mlvl.py'\n", "checkpoint_path = '../ckpt/continuous.pth'\n", "device = 'cuda:0'\n", "\n", - "# Occupancy Settings\n", - "# config_path = '../configs/occupancy/cont-occ_8xb1_embodiedscan-occ-80class.py'\n", - "# checkpoint_path = '../ckpt/continuous_occupancy.pth'\n", - "# device = 'cuda:0'\n", - "\n", "model = init_model(config_path, checkpoint_path, device=device)\n", "cfg = model.cfg\n", "classes = list(cfg.metainfo.classes)" @@ -180,10 +175,7 @@ " ann_info=dict( # empty annotation\n", " gt_bboxes_3d=np.zeros((0, 9), dtype=np.float32),\n", " gt_labels_3d=np.zeros((0, ), dtype=np.int64),\n", - " visible_instance_masks=[[] for i in range(len(poses))],\n", - " gt_occupancy=np.zeros((0,4), dtype=np.int64),\n", - " visible_occupancy_masks=[[] for i in range(len(poses))]\n", - " ))\n", + " visible_instance_masks=[[] for i in range(len(poses))]))\n", "n_frames = len(poses)\n", "data = []\n", "for i in range(1, n_frames):\n", @@ -299,19 +291,10 @@ "\n", " return boxes_tensor[selected_idx], label[selected_idx]\n", "\n", - "is_occupancy = ('pred_occupancy' in results[0])\n", - "if is_occupancy:\n", - " classes = ['empty'] + classes # 0 = empty for occupancy\n", - "\n", "filtered_results = []\n", - "if not is_occupancy:\n", - " for i in range(len(results)):\n", - " boxes, labels = nms_filter(results[i].pred_instances_3d)\n", - " filtered_results.append((boxes, labels))\n", - "else:\n", - " for i in range(len(results)):\n", - " pred_occ = results[i].pred_occupancy.cpu().numpy()\n", - " filtered_results.append(pred_occ)\n", + "for i in range(len(results)):\n", + " boxes, labels = nms_filter(results[i].pred_instances_3d)\n", + " filtered_results.append((boxes, labels))\n", "\n", "selected_image = [\n", " info['img_path'].index(img_path)\n", @@ -326,15 +309,12 @@ "for i in range(len(results)):\n", " image_ann = info['images'][selected_image[i]]\n", " image_ann['visible_instance_ids'] = []\n", - " if is_occupancy:\n", - " image_ann['pred_occupancy'] = filtered_results[i]\n", - " else:\n", - " boxes, labels = filtered_results[i]\n", - " for j in range(boxes.shape[0]):\n", - " pseudo_ann['instances'].append(\n", - " dict(bbox_3d=boxes[j], bbox_label_3d=labels[j]))\n", - " instance_id = len(pseudo_ann['instances']) - 1\n", - " image_ann['visible_instance_ids'].append(instance_id)\n", + " boxes, labels = filtered_results[i]\n", + " for j in range(boxes.shape[0]):\n", + " pseudo_ann['instances'].append(\n", + " dict(bbox_3d=boxes[j], bbox_label_3d=labels[j]))\n", + " instance_id = len(pseudo_ann['instances']) - 1\n", + " image_ann['visible_instance_ids'].append(instance_id)\n", " pseudo_ann['images'].append(image_ann)\n", "\n", "metainfo = {'categories': classes}\n", @@ -371,10 +351,7 @@ "from embodiedscan.explorer import EmbodiedScanExplorer\n", "visualizer = EmbodiedScanExplorer(data_root={'demo': root_dir},\n", " ann_file=[packed_pseudo_ann])\n", - "if not is_occupancy:\n", - " visualizer.render_continuous_scene(f'demo/{scene_name}')\n", - "else:\n", - " visualizer.render_continuous_occupancy_prediction(f'demo/{scene_name}')" + "visualizer.render_continuous_scene(f'demo/{scene_name}')" ] }, { @@ -884,12 +861,11 @@ } ], "source": [ - "if not is_occupancy:\n", - " for i in range(len(results)):\n", - " cam_name = pseudo_ann['images'][i]['img_path'].split('/')[-1][:-4]\n", - " visualizer.show_image(f'demo/{scene_name}',\n", - " camera_name=cam_name,\n", - " render_box=True)" + "for i in range(len(results)):\n", + " cam_name = pseudo_ann['images'][i]['img_path'].split('/')[-1][:-4]\n", + " visualizer.show_image(f'demo/{scene_name}',\n", + " camera_name=cam_name,\n", + " render_box=True)" ] } ], diff --git a/demo/demo.py b/models/EmbodiedScan/demo/demo.py similarity index 85% rename from demo/demo.py rename to models/EmbodiedScan/demo/demo.py index d59be7a..b9c469a 100644 --- a/demo/demo.py +++ b/models/EmbodiedScan/demo/demo.py @@ -8,16 +8,15 @@ import numpy as np import torch +from embodiedscan.explorer import EmbodiedScanExplorer +from embodiedscan.registry import DATASETS, MODELS +from embodiedscan.structures import get_box_type from mmengine.config import Config from mmengine.dataset import Compose, pseudo_collate from mmengine.registry import init_default_scope from mmengine.runner import load_checkpoint from scipy.spatial.transform import Rotation as R -from embodiedscan.explorer import EmbodiedScanExplorer -from embodiedscan.registry import DATASETS, MODELS -from embodiedscan.structures import get_box_type - def init_model(config: Union[str, Path, Config], checkpoint: Optional[str] = None, @@ -47,7 +46,7 @@ def init_model(config: Union[str, Path, Config], config.merge_from_dict(cfg_options) config.model.train_cfg = None - init_default_scope(config.get('default_scope', 'mmdet3d')) + init_default_scope(config.get('default_scope', 'embodiedscan')) model = MODELS.build(config.model) if checkpoint is not None: @@ -166,9 +165,7 @@ def main(args): ann_info=dict( # empty annotation gt_bboxes_3d=np.zeros((0, 9), dtype=np.float32), gt_labels_3d=np.zeros((0, ), dtype=np.int64), - visible_instance_masks=[[] for i in range(len(poses))], - gt_occupancy=np.zeros((0, 4), dtype=np.int64), - visible_occupancy_masks=[[] for i in range(len(poses))])) + visible_instance_masks=[[] for i in range(len(poses))])) n_frames = len(poses) data = [] for i in range(1, n_frames): @@ -210,19 +207,10 @@ def main(args): torch.cuda.empty_cache() # collect results and construct data for visualization - is_occupancy = ('pred_occupancy' in results[0]) - if is_occupancy: - classes = ['empty'] + classes # 0 = empty for occupancy - filtered_results = [] - if not is_occupancy: - for i in range(len(results)): - boxes, labels = nms_filter(results[i].pred_instances_3d) - filtered_results.append((boxes, labels)) - else: - for i in range(len(results)): - pred_occ = results[i].pred_occupancy.cpu().numpy() - filtered_results.append(pred_occ) + for i in range(len(results)): + boxes, labels = nms_filter(results[i].pred_instances_3d) + filtered_results.append((boxes, labels)) selected_image = [ info['img_path'].index(img_path) @@ -237,15 +225,12 @@ def main(args): for i in range(len(results)): image_ann = info['images'][selected_image[i]] image_ann['visible_instance_ids'] = [] - if is_occupancy: - image_ann['pred_occupancy'] = filtered_results[i] - else: - boxes, labels = filtered_results[i] - for j in range(boxes.shape[0]): - pseudo_ann['instances'].append( - dict(bbox_3d=boxes[j], bbox_label_3d=labels[j])) - instance_id = len(pseudo_ann['instances']) - 1 - image_ann['visible_instance_ids'].append(instance_id) + boxes, labels = filtered_results[i] + for j in range(boxes.shape[0]): + pseudo_ann['instances'].append( + dict(bbox_3d=boxes[j], bbox_label_3d=labels[j])) + instance_id = len(pseudo_ann['instances']) - 1 + image_ann['visible_instance_ids'].append(instance_id) pseudo_ann['images'].append(image_ann) metainfo = {'categories': classes} @@ -254,15 +239,12 @@ def main(args): # visualization visualizer = EmbodiedScanExplorer(data_root={'demo': args.root_dir}, ann_file=[packed_pseudo_ann]) - if not is_occupancy: - visualizer.render_continuous_scene(f'demo/{args.scene}') - for i in range(len(results)): - cam_name = pseudo_ann['images'][i]['img_path'].split('/')[-1][:-4] - visualizer.show_image(f'demo/{args.scene}', - camera_name=cam_name, - render_box=True) - else: - visualizer.render_continuous_occupancy_prediction(f'demo/{args.scene}') + visualizer.render_continuous_scene(f'demo/{args.scene}') + for i in range(len(results)): + cam_name = pseudo_ann['images'][i]['img_path'].split('/')[-1][:-4] + visualizer.show_image(f'demo/{args.scene}', + camera_name=cam_name, + render_box=True) if __name__ == '__main__': diff --git a/embodiedscan/converter/extract_occupancy_ann.py b/models/EmbodiedScan/embodiedscan/converter/extract_occupancy_ann.py similarity index 100% rename from embodiedscan/converter/extract_occupancy_ann.py rename to models/EmbodiedScan/embodiedscan/converter/extract_occupancy_ann.py diff --git a/embodiedscan/converter/generate_image_3rscan.py b/models/EmbodiedScan/embodiedscan/converter/generate_image_3rscan.py similarity index 100% rename from embodiedscan/converter/generate_image_3rscan.py rename to models/EmbodiedScan/embodiedscan/converter/generate_image_3rscan.py diff --git a/embodiedscan/converter/generate_image_scannet.py b/models/EmbodiedScan/embodiedscan/converter/generate_image_scannet.py similarity index 100% rename from embodiedscan/converter/generate_image_scannet.py rename to models/EmbodiedScan/embodiedscan/converter/generate_image_scannet.py diff --git a/models/EmbodiedScan/embodiedscan/datasets/__init__.py b/models/EmbodiedScan/embodiedscan/datasets/__init__.py new file mode 100644 index 0000000..0a0de9a --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/__init__.py @@ -0,0 +1,12 @@ +from .embodiedscan_dataset import EmbodiedScanDataset +from .mmscan_dataset import MMScanPointCloud3DGroundingDataset +from .mv_3dvg_dataset import MultiView3DGroundingDataset +from .pcd_3dvg_dataset import PointCloud3DGroundingDataset +from .pcd_3dvg_dataset_demo import PointCloud3DGroundingDatasetDemo +from .transforms import * # noqa: F401,F403 + +__all__ = [ + 'EmbodiedScanDataset', 'MultiView3DGroundingDataset', + 'PointCloud3DGroundingDataset', 'PointCloud3DGroundingDatasetDemo', + 'MMScanPointCloud3DGroundingDataset' +] diff --git a/embodiedscan/datasets/embodiedscan_dataset.py b/models/EmbodiedScan/embodiedscan/datasets/embodiedscan_dataset.py similarity index 94% rename from embodiedscan/datasets/embodiedscan_dataset.py rename to models/EmbodiedScan/embodiedscan/datasets/embodiedscan_dataset.py index 67ab867..f928ddb 100644 --- a/embodiedscan/datasets/embodiedscan_dataset.py +++ b/models/EmbodiedScan/embodiedscan/datasets/embodiedscan_dataset.py @@ -4,11 +4,10 @@ import mmengine import numpy as np -from mmengine.dataset import BaseDataset -from mmengine.fileio import load - from embodiedscan.registry import DATASETS from embodiedscan.structures import get_box_type +from mmengine.dataset import BaseDataset +from mmengine.fileio import load @DATASETS.register_module() @@ -223,33 +222,22 @@ def parse_ann_info(self, info: dict) -> dict: mask_filename = os.path.join(self.data_prefix.get('img_path', ''), ann_dataset, building, 'occupancy', f'visible_occupancy_{region}.pkl') - elif ann_dataset == 'arkitscenes': - occ_filename = None - mask_filename = None else: raise NotImplementedError - if occ_filename is None: - gt_occ = np.zeros((0, 4), dtype=np.int64) - else: - gt_occ = np.load(occ_filename) - for i in range(gt_occ.shape[0]): - cls_id = self.occ_label_mapping[gt_occ[i][3]] - if cls_id < 0: - cls_id = 255 - gt_occ[i][3] = cls_id + gt_occ = np.load(occ_filename) + for i in range(gt_occ.shape[0]): + cls_id = self.occ_label_mapping[gt_occ[i][3]] + if cls_id < 0: + cls_id = 255 + gt_occ[i][3] = cls_id ann_info['gt_occupancy'] = gt_occ - if mask_filename is None: - ann_info['visible_occupancy_masks'] = [ - [] for i in range(len(info['images'])) - ] - else: - ann_info['visible_occupancy_masks'] = [] - occ_masks = mmengine.load(mask_filename) - for i in range(len(info['images'])): - ann_info['visible_occupancy_masks'].append( - occ_masks[i]['visible_occupancy']) + ann_info['visible_occupancy_masks'] = [] + occ_masks = mmengine.load(mask_filename) + for i in range(len(info['images'])): + ann_info['visible_occupancy_masks'].append( + occ_masks[i]['visible_occupancy']) ann_info['gt_bboxes_3d'] = self.box_type_3d( ann_info['gt_bboxes_3d'], diff --git a/models/EmbodiedScan/embodiedscan/datasets/mmscan_dataset.py b/models/EmbodiedScan/embodiedscan/datasets/mmscan_dataset.py new file mode 100644 index 0000000..eee72a2 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/mmscan_dataset.py @@ -0,0 +1,542 @@ +# Copyright (c) OpenRobotLab. All rights reserved. +import os +import warnings +from os import path as osp +from typing import Callable, List, Optional, Union + +import mmengine +import numpy as np +from embodiedscan.registry import DATASETS +from embodiedscan.structures import get_box_type +from embodiedscan.structures.points import DepthPoints, get_points_type +from lry_utils.utils_read import to_sample_idx +from mmengine.dataset import BaseDataset +from mmengine.fileio import load + +import mmscan +from mmscan import MMScan + + +@DATASETS.register_module() +class MMScanPointCloud3DGroundingDataset(BaseDataset): + r"""Multi-View 3D Grounding Dataset for EmbodiedScan. + + This class serves as the API for experiments on the EmbodiedScan Dataset. + + Please refer to `EmbodiedScan Dataset + `_ for data downloading. + + TODO: Merge the implementation with EmbodiedScanDataset. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + vg_file (str): Path of the visual grounding annotation file. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Euler-Depth' in this dataset. + serialize_data (bool): Whether to serialize all data samples to save + memory. Defaults to False. It is set to True typically, but we + need to do the serialization after getting the data_list through + the preliminary loading and converting. Therefore, we set it to + False by default and serialize data samples at last meanwhile + setting this attribute to True. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + remove_dontcare (bool): Whether to remove objects that we do not care. + Defaults to False. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + load_eval_anns (bool): Whether to load evaluation annotations. + Defaults to True. Only take effect when test_mode is True. + """ + # NOTE: category "step" -> "steps" to avoid potential naming conflicts in + # TensorboardVisBackend + METAINFO = { + 'classes': + ('adhesive tape', 'air conditioner', 'alarm', 'album', 'arch', + 'backpack', 'bag', 'balcony', 'ball', 'banister', 'bar', 'barricade', + 'baseboard', 'basin', 'basket', 'bathtub', 'beam', 'beanbag', 'bed', + 'bench', 'bicycle', 'bidet', 'bin', 'blackboard', 'blanket', 'blinds', + 'board', 'body loofah', 'book', 'boots', 'bottle', 'bowl', 'box', + 'bread', 'broom', 'brush', 'bucket', 'cabinet', 'calendar', 'camera', + 'can', 'candle', 'candlestick', 'cap', 'car', 'carpet', 'cart', + 'case', 'ceiling', 'chair', 'chandelier', 'cleanser', 'clock', + 'clothes', 'clothes dryer', 'coat hanger', 'coffee maker', 'coil', + 'column', 'commode', 'computer', 'conducting wire', 'container', + 'control', 'copier', 'cosmetics', 'couch', 'counter', 'countertop', + 'crate', 'crib', 'cube', 'cup', 'curtain', 'cushion', 'decoration', + 'desk', 'detergent', 'device', 'dish rack', 'dishwasher', 'dispenser', + 'divider', 'door', 'door knob', 'doorframe', 'doorway', 'drawer', + 'dress', 'dresser', 'drum', 'duct', 'dumbbell', 'dustpan', 'dvd', + 'eraser', 'excercise equipment', 'fan', 'faucet', 'fence', 'file', + 'fire extinguisher', 'fireplace', 'floor', 'flowerpot', 'flush', + 'folder', 'food', 'footstool', 'frame', 'fruit', 'furniture', + 'garage door', 'garbage', 'glass', 'globe', 'glove', 'grab bar', + 'grass', 'guitar', 'hair dryer', 'hamper', 'handle', 'hanger', 'hat', + 'headboard', 'headphones', 'heater', 'helmets', 'holder', 'hook', + 'humidifier', 'ironware', 'jacket', 'jalousie', 'jar', 'kettle', + 'keyboard', 'kitchen island', 'kitchenware', 'knife', 'label', + 'ladder', 'lamp', 'laptop', 'ledge', 'letter', 'light', 'luggage', + 'machine', 'magazine', 'mailbox', 'map', 'mask', 'mat', 'mattress', + 'menu', 'microwave', 'mirror', 'molding', 'monitor', 'mop', 'mouse', + 'napkins', 'notebook', 'object', 'ottoman', 'oven', 'pack', 'package', + 'pad', 'pan', 'panel', 'paper', 'paper cutter', 'partition', + 'pedestal', 'pen', 'person', 'piano', 'picture', 'pillar', 'pillow', + 'pipe', 'pitcher', 'plant', 'plate', 'player', 'plug', 'plunger', + 'pool', 'pool table', 'poster', 'pot', 'price tag', 'printer', + 'projector', 'purse', 'rack', 'radiator', 'radio', 'rail', + 'range hood', 'refrigerator', 'remote control', 'ridge', 'rod', + 'roll', 'roof', 'rope', 'sack', 'salt', 'scale', 'scissors', 'screen', + 'seasoning', 'shampoo', 'sheet', 'shelf', 'shirt', 'shoe', 'shovel', + 'shower', 'sign', 'sink', 'soap', 'soap dish', 'soap dispenser', + 'socket', 'speaker', 'sponge', 'spoon', 'stairs', 'stall', 'stand', + 'stapler', 'statue', 'steps', 'stick', 'stool', 'stopcock', 'stove', + 'structure', 'sunglasses', 'support', 'switch', 'table', 'tablet', + 'teapot', 'telephone', 'thermostat', 'tissue', 'tissue box', + 'toaster', 'toilet', 'toilet paper', 'toiletry', 'tool', 'toothbrush', + 'toothpaste', 'towel', 'toy', 'tray', 'treadmill', 'trophy', 'tube', + 'tv', 'umbrella', 'urn', 'utensil', 'vacuum cleaner', 'vanity', + 'vase', 'vent', 'ventilation', 'wall', 'wardrobe', 'washbasin', + 'washing machine', 'water cooler', 'water heater', 'window', + 'window frame', 'windowsill', 'wine', 'wire', 'wood', 'wrap'), + 'valid_class_ids': + (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288) + } + + # anno_file is not needed + def __init__(self, + data_root: str, + ann_file: str, + vg_file: str, + metainfo: Optional[dict] = None, + pipeline: List[Union[dict, Callable]] = [], + box_type_3d: str = 'Euler-Depth', + serialize_data: bool = False, + filter_empty_gt: bool = True, + remove_dontcare: bool = False, + test_mode: bool = False, + load_eval_anns: bool = True, + tokens_positive_rebuild: bool = False, + **kwargs) -> None: + + self.mmscan_loader = MMScan(version='v1', + split='val' if test_mode else 'train', + task='MMScan-VG', + ratio=0.2) + + if 'classes' in metainfo: + if metainfo['classes'] == 'all': + metainfo['classes'] = list(self.METAINFO['classes']) + + self.det3d_valid_id2label = np.zeros( + max(self.METAINFO['valid_class_ids']) + 1, dtype=np.int64) + for _ in range(self.det3d_valid_id2label.shape[0]): + self.det3d_valid_id2label[_] = -1 + for cls_idx, cat_id in enumerate(self.METAINFO['valid_class_ids']): + self.det3d_valid_id2label[cat_id] = cls_idx + + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) + self.filter_empty_gt = filter_empty_gt + self.remove_dontcare = remove_dontcare + self.load_eval_anns = load_eval_anns + self.tokens_positive_rebuild = tokens_positive_rebuild + + super().__init__(data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + pipeline=pipeline, + serialize_data=serialize_data, + test_mode=test_mode, + **kwargs) + + self.vg_file = osp.join(self.data_root, vg_file) + self.convert_info_to_scan() + self.data_list = self.load_language_data() + print(f'successfully loaded {len(self.data_list)} samples') + self.data_bytes, self.data_address = self._serialize_data() + self.serialize_data = True + + def process_metainfo(self): + """This function will be processed after metainfos from ann_file and + config are combined.""" + + print('PROCESSING METAINFO!!!!!!!!!!!!!!!') + assert 'categories' in self._metainfo + + if 'classes' not in self._metainfo: + self._metainfo.setdefault( + 'classes', list(self._metainfo['categories'].keys())) + + self.label_mapping = np.full( + max(list(self._metainfo['categories'].values())) + 1, + -1, + dtype=int) + + # val to match the index of the key + for key, value in self._metainfo['categories'].items(): + if key in self._metainfo['classes']: + self.label_mapping[value] = self._metainfo['classes'].index( + key) + + self.occ_label_mapping = np.full( + max(list(self._metainfo['categories'].values())) + 1, + -1, + dtype=int) + if 'occ_classes' in self._metainfo: + for idx, label_name in enumerate(self._metainfo['occ_classes']): + self.occ_label_mapping[self.metainfo['categories'][ + label_name]] = idx + 1 # 1-based, 0 is empty + + @staticmethod + def _get_axis_align_matrix(info: dict) -> np.ndarray: + """Get axis_align_matrix from info. If not exist, return identity mat. + + Args: + info (dict): Info of a single sample data. + + Returns: + np.ndarray: 4x4 transformation matrix. + """ + if 'axis_align_matrix' in info: + return np.array(info['axis_align_matrix']) + else: + warnings.warn( + 'axis_align_matrix is not found in ScanNet data info, please ' + 'use new pre-process scripts to re-generate ScanNet data') + return np.eye(4).astype(np.float32) + + # need to compensate the scan_id info to the original pkl file + def convert_info_to_scan(self): + + self.scans = dict() + + for data in self.data_list: + scan_id = data['sample_idx'] + data.update({'scan_id': scan_id}) + self.scans[scan_id] = data + print('number of true scans:', len(list(self.scans.keys()))) + + @staticmethod + def _is_view_dep(text): + """Check whether to augment based on sr3d utterance.""" + rels = [ + 'front', 'behind', 'back', 'left', 'right', 'facing', 'leftmost', + 'rightmost', 'looking', 'across' + ] + words = set(text.split()) + return any(rel in words for rel in rels) + + def load_data_list(self) -> List[dict]: + """Load annotations from an annotation file named as ``self.ann_file`` + + If the annotation file does not follow `OpenMMLab 2.0 format dataset + `_ . + The subclass must override this method for load annotations. The meta + information of annotation file will be overwritten :attr:`METAINFO` + and ``metainfo`` argument of constructor. + + Returns: + list[dict]: A list of annotation. + """ # noqa: E501 + # `self.ann_file` denotes the absolute annotation file path if + # `self.root=None` or relative path if `self.root=/path/to/data/`. + + annotations = load(self.mmscan_loader.get_possess('es_file', '')) + if not isinstance(annotations, dict): + raise TypeError(f'The annotations loaded from annotation file ' + f'should be a dict, but got {type(annotations)}!') + if 'data_list' not in annotations or 'metainfo' not in annotations: + raise ValueError('Annotation must have data_list and metainfo ' + 'keys') + metainfo = annotations['metainfo'] + raw_data_list = annotations['data_list'] + + # Meta information load from annotation file will not influence the + # existed meta information load from `BaseDataset.METAINFO` and + # `metainfo` arguments defined in constructor. + for k, v in metainfo.items(): + self._metainfo.setdefault(k, v) + + self.process_metainfo() + + # load and parse data_infos. + data_list = [] + for raw_data_info in raw_data_list: + # parse raw data information to target format + data_info = self.parse_data_info(raw_data_info) + if isinstance(data_info, dict): + # For image tasks, `data_info` should information if single + # image, such as dict(img_path='xxx', width=360, ...) + data_list.append(data_info) + elif isinstance(data_info, list): + # For video tasks, `data_info` could contain image + # information of multiple frames, such as + # [dict(video_path='xxx', timestamps=...), + # dict(video_path='xxx', timestamps=...)] + for item in data_info: + if not isinstance(item, dict): + raise TypeError('data_info must be list of dict, but ' + f'got {type(item)}') + data_list.extend(data_info) + else: + raise TypeError('data_info should be a dict or list of dict, ' + f'but got {type(data_info)}') + + return data_list + + def load_language_data(self): + # load the object-level annotations + language_annotations = self.mmscan_loader.samples + + language_infos = [] + print('number of scans loaded from info data:', + len(list(self.scans.keys()))) + num_dropped_by_not_scan_id = 0 + num_dropped_by_multiple = 0 + for anno in mmengine.track_iter_progress(language_annotations): + language_info = dict() + anno['scan_id'] = to_sample_idx(anno['scan_id']) + language_info.update({ + 'scan_id': anno['scan_id'], + 'text': anno['text'] + }) + if not language_info['scan_id'] in self.scans: + num_dropped_by_not_scan_id += 1 + continue #HACK: only happens at debugging mini data + data = self.scans[language_info['scan_id']] + language_info['scan_id'] = data['scan_id'] + + ann_info = data['ann_info'] + + # save the bounding boxes and corresponding labels + language_anno_info = dict() + language_anno_info['is_view_dep'] = self._is_view_dep( + language_info['text']) + labels = ann_info['gt_labels_3d'] # all box labels in the scan + bboxes = ann_info['gt_bboxes_3d'] # BaseInstanceBboxes + if 'target_id' in anno: # w/ ground truths + language_info.update({'target_id': anno['target_id']}) + # obtain all objects sharing the same category with + # the target object, the num of such objects <= 32 + object_ids = ann_info['bbox_id'] # numpy array + if isinstance(anno['target_id'], int): + object_ind = np.where( + object_ids == language_info['target_id'])[0] + if len(object_ind) != 1: + num_dropped_by_multiple += 1 + continue + language_anno_info['gt_bboxes_3d'] = bboxes[object_ind] + language_anno_info['gt_labels_3d'] = labels[object_ind] + if 'tokens_positive' in anno: + # to be removed after the info being updated + if self.tokens_positive_rebuild: + anno['tokens_positive'] = [[ + anno['text'].find(part), + anno['text'].find(part) + len(part) + ] for part in anno['target'].split()] + language_info['tokens_positive'] = [ + anno['tokens_positive'] + ] + elif isinstance(anno['target_id'], List): + object_indices = [] + keep_indices = [] + is_mapping_unique = True + for idx, target_id in enumerate( + language_info['target_id']): + assert isinstance(target_id, int) + object_ind = np.where(object_ids == target_id)[0] + if len(object_ind) != 1: + is_mapping_unique = False + break + keep_indices.append(idx) + object_indices.append(object_ind[0]) + + if not is_mapping_unique: + num_dropped_by_multiple += 1 + continue + else: + language_anno_info['gt_bboxes_3d'] = bboxes[ + object_indices] + language_anno_info['gt_labels_3d'] = labels[ + object_indices] + if 'tokens_positive' in anno: + language_info['tokens_positive'] = [[ + anno['tokens_positive'][idx] + ] for idx in keep_indices] + else: + raise NotImplementedError + # include other optional keys + optional_keys = ['distractor_ids'] + for key in optional_keys: + if key in anno: + language_info.update({key: anno[key]}) + # the 'distractor_ids' starts from 1, not 0 + language_anno_info['is_hard'] = len( + language_info['distractor_ids'] + ) > 3 # more than three distractors + language_anno_info['is_unique'] = len( + language_info['distractor_ids']) == 0 + + sub_class = anno.get('sub_class', '').lower() + if sub_class: + space = 'space' in sub_class or 'or' in sub_class + attribute = 'attribute' in sub_class or 'eq' in sub_class + assert space + attribute == 1, f'Invalid sub_class {sub_class}, should be space or attribute' + indirect = 'indirect' in sub_class + direct = not indirect + # assert "direct" in sub_class, f"Invalid sub_class {sub_class}, should contain the word direct" + language_anno_info['space'] = space + language_anno_info['direct'] = direct + language_anno_info['sub_class'] = sub_class + else: + language_anno_info['space'] = False + language_anno_info['direct'] = False + else: + # inference w/o gt, assign the placeholder gt_boxes and labels + language_anno_info['gt_bboxes_3d'] = bboxes + language_anno_info['gt_labels_3d'] = labels + # placeholder value for 'is_hard' and 'is_unique' + language_anno_info['is_hard'] = False + language_anno_info['is_unique'] = False + language_anno_info['space'] = False + language_anno_info['direct'] = False + + if not self.test_mode: + language_info['ann_info'] = language_anno_info + + if self.test_mode and self.load_eval_anns: + language_info['ann_info'] = language_anno_info + language_info['eval_ann_info'] = language_info['ann_info'] + # adding pcd info + + language_info['pc_file'] = self.mmscan_loader.get_possess( + 'pc_file', language_info['scan_id']) + + language_infos.append(language_info) + + del self.scans + print('dropped by false scan id', num_dropped_by_not_scan_id) + print('dropped by multiple indices', num_dropped_by_multiple) + return language_infos + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + The only difference with it in `Det3DDataset` + is the specific process for `axis_align_matrix'. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + info['box_type_3d'] = self.box_type_3d + info['axis_align_matrix'] = self._get_axis_align_matrix(info) + + if not self.test_mode: + info['ann_info'] = self.parse_ann_info(info) + if self.test_mode and self.load_eval_anns: + info['ann_info'] = self.parse_ann_info(info) + info['eval_ann_info'] = info['ann_info'] + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Info dict. + + Returns: + dict: Processed `ann_info`. + """ + + for instance in info['instances']: + if instance['bbox_label_3d'] < self.det3d_valid_id2label.shape[0]: + value = self.det3d_valid_id2label[instance['bbox_label_3d']] + if value < 0: + raise Exception('Class out of range') + instance['bbox_label_3d'] = value + else: + raise Exception('Class out of range') + + # change some now from the base pkl + name_mapping = { + 'bbox_label_3d': 'gt_labels_3d', + 'bbox_label': 'gt_bboxes_labels', + 'bbox': 'gt_bboxes', + 'bbox_3d': 'gt_bboxes_3d', + 'depth': 'depths', + 'center_2d': 'centers_2d', + 'attr_label': 'attr_labels', + 'velocity': 'velocities', + } + instances = info['instances'] + # empty gt + if len(instances) == 0: + return None + else: + keys = list(instances[0].keys()) + ann_info = dict() + for ann_name in keys: + temp_anns = [item[ann_name] for item in instances] + # map the original dataset label to training label + if 'label' in ann_name and ann_name != 'attr_label': + temp_anns = [ + self.label_mapping[item] for item in temp_anns + ] + if ann_name in name_mapping: + mapped_ann_name = name_mapping[ann_name] + else: + mapped_ann_name = ann_name + + if 'label' in ann_name: + temp_anns = np.array(temp_anns).astype(np.int64) + elif ann_name in name_mapping: + temp_anns = np.array(temp_anns).astype(np.float32) + else: + temp_anns = np.array(temp_anns) + + ann_info[mapped_ann_name] = temp_anns + ann_info['instances'] = info['instances'] + + if ann_info is None: + ann_info = dict() + ann_info['gt_bboxes_3d'] = np.zeros((0, 9), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros((0, ), dtype=np.int64) + + ann_info['gt_bboxes_3d'] = self.box_type_3d( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + with_yaw=True, + origin=(0.5, 0.5, 0.5)) + + return ann_info diff --git a/embodiedscan/datasets/mv_3dvg_dataset.py b/models/EmbodiedScan/embodiedscan/datasets/mv_3dvg_dataset.py similarity index 90% rename from embodiedscan/datasets/mv_3dvg_dataset.py rename to models/EmbodiedScan/embodiedscan/datasets/mv_3dvg_dataset.py index 4e3a5a2..6d4e802 100644 --- a/embodiedscan/datasets/mv_3dvg_dataset.py +++ b/models/EmbodiedScan/embodiedscan/datasets/mv_3dvg_dataset.py @@ -6,11 +6,11 @@ import mmengine import numpy as np -from mmengine.dataset import BaseDataset -from mmengine.fileio import load - from embodiedscan.registry import DATASETS from embodiedscan.structures import get_box_type +from lry_utils.utils_read import to_sample_idx +from mmengine.dataset import BaseDataset +from mmengine.fileio import load @DATASETS.register_module() @@ -145,6 +145,13 @@ def __init__(self, if metainfo['classes'] == 'all': metainfo['classes'] = list(self.METAINFO['classes']) + self.det3d_valid_id2label = np.zeros( + max(self.METAINFO['valid_class_ids']) + 1, dtype=np.int64) + for _ in range(self.det3d_valid_id2label.shape[0]): + self.det3d_valid_id2label[_] = -1 + for cls_idx, cat_id in enumerate(self.METAINFO['valid_class_ids']): + self.det3d_valid_id2label[cat_id] = cls_idx + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) self.filter_empty_gt = filter_empty_gt self.remove_dontcare = remove_dontcare @@ -162,6 +169,7 @@ def __init__(self, self.vg_file = osp.join(self.data_root, vg_file) self.convert_info_to_scan() self.data_list = self.load_language_data() + print(f'successfully loaded {len(self.data_list)} samples') self.data_bytes, self.data_address = self._serialize_data() self.serialize_data = True @@ -300,12 +308,20 @@ def load_language_data(self): # According to each object annotation, # find all objects in the corresponding scan language_infos = [] + print('number of scans loaded from info data:', + len(list(self.scans.keys()))) + num_dropped_by_not_scan_id = 0 + num_dropped_by_multiple = 0 for anno in mmengine.track_iter_progress(language_annotations): language_info = dict() + anno['scan_id'] = to_sample_idx(anno['scan_id']) language_info.update({ 'scan_id': anno['scan_id'], 'text': anno['text'] }) + if language_info['scan_id'] not in self.scans: + num_dropped_by_not_scan_id += 1 + continue data = self.scans[language_info['scan_id']] language_info['axis_align_matrix'] = data['axis_align_matrix'] language_info['img_path'] = data['img_path'] @@ -334,6 +350,7 @@ def load_language_data(self): object_ind = np.where( object_ids == language_info['target_id'])[0] if len(object_ind) != 1: + num_dropped_by_multiple += 1 continue language_anno_info['gt_bboxes_3d'] = bboxes[object_ind] language_anno_info['gt_labels_3d'] = labels[object_ind] @@ -353,6 +370,7 @@ def load_language_data(self): is_mapping_unique = True for idx, target_id in enumerate( language_info['target_id']): + assert isinstance(target_id, int) object_ind = np.where(object_ids == target_id)[0] if len(object_ind) != 1: is_mapping_unique = False @@ -360,6 +378,7 @@ def load_language_data(self): keep_indices.append(idx) object_indices.append(object_ind[0]) if not is_mapping_unique: + num_dropped_by_multiple += 1 continue else: language_anno_info['gt_bboxes_3d'] = bboxes[ @@ -383,6 +402,20 @@ def load_language_data(self): ) > 3 # more than three distractors language_anno_info['is_unique'] = len( language_info['distractor_ids']) == 0 + sub_class = anno.get('sub_class', '').lower() + if sub_class: + space = 'space' in sub_class or 'or' in sub_class + attribute = 'attribute' in sub_class or 'eq' in sub_class + assert space + attribute == 1, f'Invalid sub_class {sub_class}, should be space or attribute' + indirect = 'indirect' in sub_class + direct = not indirect + # assert "direct" in sub_class, f"Invalid sub_class {sub_class}, should contain the word direct" + language_anno_info['space'] = space + language_anno_info['direct'] = direct + language_anno_info['sub_class'] = sub_class + else: + language_anno_info['space'] = False + language_anno_info['direct'] = False else: # inference w/o gt, assign the placeholder gt_boxes and labels language_anno_info['gt_bboxes_3d'] = bboxes @@ -390,6 +423,8 @@ def load_language_data(self): # placeholder value for 'is_hard' and 'is_unique' language_anno_info['is_hard'] = False language_anno_info['is_unique'] = False + language_anno_info['space'] = False + language_anno_info['direct'] = False if not self.test_mode: language_info['ann_info'] = language_anno_info @@ -401,7 +436,8 @@ def load_language_data(self): language_infos.append(language_info) del self.scans - + print('dropped by false scan id', num_dropped_by_not_scan_id) + print('dropped by multiple indices', num_dropped_by_multiple) return language_infos def parse_data_info(self, info: dict) -> dict: @@ -474,6 +510,15 @@ def parse_ann_info(self, info: dict) -> dict: Returns: dict: Processed `ann_info`. """ + for instance in info['instances']: + if instance['bbox_label_3d'] < self.det3d_valid_id2label.shape[0]: + value = self.det3d_valid_id2label[instance['bbox_label_3d']] + if value < 0: + raise Exception('Class out of range') + instance['bbox_label_3d'] = value + else: + raise Exception('Class out of range') + ann_info = None if 'instances' in info and len(info['instances']) > 0: diff --git a/models/EmbodiedScan/embodiedscan/datasets/pcd_3dvg_dataset.py b/models/EmbodiedScan/embodiedscan/datasets/pcd_3dvg_dataset.py new file mode 100644 index 0000000..5c33aec --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/pcd_3dvg_dataset.py @@ -0,0 +1,538 @@ +# Copyright (c) OpenRobotLab. All rights reserved. +import os +import warnings +from os import path as osp +from typing import Callable, List, Optional, Union + +import mmengine +import numpy as np +from embodiedscan.registry import DATASETS +from embodiedscan.structures import get_box_type +from lry_utils.utils_read import to_sample_idx +from mmengine.dataset import BaseDataset +from mmengine.fileio import load + + +@DATASETS.register_module() +class PointCloud3DGroundingDataset(BaseDataset): + r"""Multi-View 3D Grounding Dataset for EmbodiedScan. + + This class serves as the API for experiments on the EmbodiedScan Dataset. + + Please refer to `EmbodiedScan Dataset + `_ for data downloading. + + TODO: Merge the implementation with EmbodiedScanDataset. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + vg_file (str): Path of the visual grounding annotation file. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Euler-Depth' in this dataset. + serialize_data (bool): Whether to serialize all data samples to save + memory. Defaults to False. It is set to True typically, but we + need to do the serialization after getting the data_list through + the preliminary loading and converting. Therefore, we set it to + False by default and serialize data samples at last meanwhile + setting this attribute to True. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + remove_dontcare (bool): Whether to remove objects that we do not care. + Defaults to False. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + load_eval_anns (bool): Whether to load evaluation annotations. + Defaults to True. Only take effect when test_mode is True. + """ + # NOTE: category "step" -> "steps" to avoid potential naming conflicts in + # TensorboardVisBackend + METAINFO = { + 'classes': + ('adhesive tape', 'air conditioner', 'alarm', 'album', 'arch', + 'backpack', 'bag', 'balcony', 'ball', 'banister', 'bar', 'barricade', + 'baseboard', 'basin', 'basket', 'bathtub', 'beam', 'beanbag', 'bed', + 'bench', 'bicycle', 'bidet', 'bin', 'blackboard', 'blanket', 'blinds', + 'board', 'body loofah', 'book', 'boots', 'bottle', 'bowl', 'box', + 'bread', 'broom', 'brush', 'bucket', 'cabinet', 'calendar', 'camera', + 'can', 'candle', 'candlestick', 'cap', 'car', 'carpet', 'cart', + 'case', 'ceiling', 'chair', 'chandelier', 'cleanser', 'clock', + 'clothes', 'clothes dryer', 'coat hanger', 'coffee maker', 'coil', + 'column', 'commode', 'computer', 'conducting wire', 'container', + 'control', 'copier', 'cosmetics', 'couch', 'counter', 'countertop', + 'crate', 'crib', 'cube', 'cup', 'curtain', 'cushion', 'decoration', + 'desk', 'detergent', 'device', 'dish rack', 'dishwasher', 'dispenser', + 'divider', 'door', 'door knob', 'doorframe', 'doorway', 'drawer', + 'dress', 'dresser', 'drum', 'duct', 'dumbbell', 'dustpan', 'dvd', + 'eraser', 'excercise equipment', 'fan', 'faucet', 'fence', 'file', + 'fire extinguisher', 'fireplace', 'floor', 'flowerpot', 'flush', + 'folder', 'food', 'footstool', 'frame', 'fruit', 'furniture', + 'garage door', 'garbage', 'glass', 'globe', 'glove', 'grab bar', + 'grass', 'guitar', 'hair dryer', 'hamper', 'handle', 'hanger', 'hat', + 'headboard', 'headphones', 'heater', 'helmets', 'holder', 'hook', + 'humidifier', 'ironware', 'jacket', 'jalousie', 'jar', 'kettle', + 'keyboard', 'kitchen island', 'kitchenware', 'knife', 'label', + 'ladder', 'lamp', 'laptop', 'ledge', 'letter', 'light', 'luggage', + 'machine', 'magazine', 'mailbox', 'map', 'mask', 'mat', 'mattress', + 'menu', 'microwave', 'mirror', 'molding', 'monitor', 'mop', 'mouse', + 'napkins', 'notebook', 'object', 'ottoman', 'oven', 'pack', 'package', + 'pad', 'pan', 'panel', 'paper', 'paper cutter', 'partition', + 'pedestal', 'pen', 'person', 'piano', 'picture', 'pillar', 'pillow', + 'pipe', 'pitcher', 'plant', 'plate', 'player', 'plug', 'plunger', + 'pool', 'pool table', 'poster', 'pot', 'price tag', 'printer', + 'projector', 'purse', 'rack', 'radiator', 'radio', 'rail', + 'range hood', 'refrigerator', 'remote control', 'ridge', 'rod', + 'roll', 'roof', 'rope', 'sack', 'salt', 'scale', 'scissors', 'screen', + 'seasoning', 'shampoo', 'sheet', 'shelf', 'shirt', 'shoe', 'shovel', + 'shower', 'sign', 'sink', 'soap', 'soap dish', 'soap dispenser', + 'socket', 'speaker', 'sponge', 'spoon', 'stairs', 'stall', 'stand', + 'stapler', 'statue', 'steps', 'stick', 'stool', 'stopcock', 'stove', + 'structure', 'sunglasses', 'support', 'switch', 'table', 'tablet', + 'teapot', 'telephone', 'thermostat', 'tissue', 'tissue box', + 'toaster', 'toilet', 'toilet paper', 'toiletry', 'tool', 'toothbrush', + 'toothpaste', 'towel', 'toy', 'tray', 'treadmill', 'trophy', 'tube', + 'tv', 'umbrella', 'urn', 'utensil', 'vacuum cleaner', 'vanity', + 'vase', 'vent', 'ventilation', 'wall', 'wardrobe', 'washbasin', + 'washing machine', 'water cooler', 'water heater', 'window', + 'window frame', 'windowsill', 'wine', 'wire', 'wood', 'wrap'), + 'valid_class_ids': + (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288) + } + + def __init__(self, + data_root: str, + ann_file: str, + vg_file: str, + metainfo: Optional[dict] = None, + pipeline: List[Union[dict, Callable]] = [], + box_type_3d: str = 'Euler-Depth', + serialize_data: bool = False, + filter_empty_gt: bool = True, + remove_dontcare: bool = False, + test_mode: bool = False, + load_eval_anns: bool = True, + tokens_positive_rebuild: bool = False, + **kwargs) -> None: + + if 'classes' in metainfo: + if metainfo['classes'] == 'all': + metainfo['classes'] = list(self.METAINFO['classes']) + + self.det3d_valid_id2label = np.zeros( + max(self.METAINFO['valid_class_ids']) + 1, dtype=np.int64) + for _ in range(self.det3d_valid_id2label.shape[0]): + self.det3d_valid_id2label[_] = -1 + for cls_idx, cat_id in enumerate(self.METAINFO['valid_class_ids']): + self.det3d_valid_id2label[cat_id] = cls_idx + + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) + self.filter_empty_gt = filter_empty_gt + self.remove_dontcare = remove_dontcare + self.load_eval_anns = load_eval_anns + self.tokens_positive_rebuild = tokens_positive_rebuild + + super().__init__(data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + pipeline=pipeline, + serialize_data=serialize_data, + test_mode=test_mode, + **kwargs) + + self.vg_file = osp.join(self.data_root, vg_file) + self.convert_info_to_scan() + self.data_list = self.load_language_data() + print(f'successfully loaded {len(self.data_list)} samples') + self.data_bytes, self.data_address = self._serialize_data() + self.serialize_data = True + + def process_metainfo(self): + """This function will be processed after metainfos from ann_file and + config are combined.""" + + print('PROCESSING METAINFO!!!!!!!!!!!!!!!') + assert 'categories' in self._metainfo + + if 'classes' not in self._metainfo: + self._metainfo.setdefault( + 'classes', list(self._metainfo['categories'].keys())) + + self.label_mapping = np.full( + max(list(self._metainfo['categories'].values())) + 1, + -1, + dtype=int) + + # val to match the index of the key + for key, value in self._metainfo['categories'].items(): + if key in self._metainfo['classes']: + self.label_mapping[value] = self._metainfo['classes'].index( + key) + + self.occ_label_mapping = np.full( + max(list(self._metainfo['categories'].values())) + 1, + -1, + dtype=int) + if 'occ_classes' in self._metainfo: + for idx, label_name in enumerate(self._metainfo['occ_classes']): + self.occ_label_mapping[self.metainfo['categories'][ + label_name]] = idx + 1 # 1-based, 0 is empty + + @staticmethod + def _get_axis_align_matrix(info: dict) -> np.ndarray: + """Get axis_align_matrix from info. If not exist, return identity mat. + + Args: + info (dict): Info of a single sample data. + + Returns: + np.ndarray: 4x4 transformation matrix. + """ + if 'axis_align_matrix' in info: + return np.array(info['axis_align_matrix']) + else: + warnings.warn( + 'axis_align_matrix is not found in ScanNet data info, please ' + 'use new pre-process scripts to re-generate ScanNet data') + return np.eye(4).astype(np.float32) + + # need to compensate the scan_id info to the original pkl file + def convert_info_to_scan(self): + print('CONVERT INFOS!!!!!!!!') + self.scans = dict() + + for data in self.data_list: + scan_id = data['sample_idx'] + data.update({'scan_id': scan_id}) + self.scans[scan_id] = data + print('number of true scans:', len(list(self.scans.keys()))) + + @staticmethod + def _is_view_dep(text): + """Check whether to augment based on sr3d utterance.""" + rels = [ + 'front', 'behind', 'back', 'left', 'right', 'facing', 'leftmost', + 'rightmost', 'looking', 'across' + ] + words = set(text.split()) + return any(rel in words for rel in rels) + + def load_data_list(self) -> List[dict]: + """Load annotations from an annotation file named as ``self.ann_file`` + + If the annotation file does not follow `OpenMMLab 2.0 format dataset + `_ . + The subclass must override this method for load annotations. The meta + information of annotation file will be overwritten :attr:`METAINFO` + and ``metainfo`` argument of constructor. + + Returns: + list[dict]: A list of annotation. + """ # noqa: E501 + # `self.ann_file` denotes the absolute annotation file path if + # `self.root=None` or relative path if `self.root=/path/to/data/`. + + annotations = load(self.ann_file) + if not isinstance(annotations, dict): + raise TypeError(f'The annotations loaded from annotation file ' + f'should be a dict, but got {type(annotations)}!') + if 'data_list' not in annotations or 'metainfo' not in annotations: + raise ValueError('Annotation must have data_list and metainfo ' + 'keys') + metainfo = annotations['metainfo'] + raw_data_list = annotations['data_list'] + + # Meta information load from annotation file will not influence the + # existed meta information load from `BaseDataset.METAINFO` and + # `metainfo` arguments defined in constructor. + for k, v in metainfo.items(): + self._metainfo.setdefault(k, v) + + self.process_metainfo() + + # load and parse data_infos. + data_list = [] + for raw_data_info in raw_data_list: + # parse raw data information to target format + data_info = self.parse_data_info(raw_data_info) + if isinstance(data_info, dict): + # For image tasks, `data_info` should information if single + # image, such as dict(img_path='xxx', width=360, ...) + data_list.append(data_info) + elif isinstance(data_info, list): + # For video tasks, `data_info` could contain image + # information of multiple frames, such as + # [dict(video_path='xxx', timestamps=...), + # dict(video_path='xxx', timestamps=...)] + for item in data_info: + if not isinstance(item, dict): + raise TypeError('data_info must be list of dict, but ' + f'got {type(item)}') + data_list.extend(data_info) + else: + raise TypeError('data_info should be a dict or list of dict, ' + f'but got {type(data_info)}') + + return data_list + + def load_language_data(self): + # load the object-level annotations + language_annotations = load(self.vg_file) + # language_infos = [ + # { + # 'scan_id': anno['scan_id'], + # 'text': anno['text'], + # 'target_id': int(anno['target_id']), (training) + # 'distractor_ids': anno['distractor_ids'], (training) + # 'tokens_positive': anno['tokens_positive'] (training) + # } + # for anno in language_annotations + # ] + # According to each object annotation, + # find all objects in the corresponding scan + language_infos = [] + print('number of scans loaded from info data:', + len(list(self.scans.keys()))) + num_dropped_by_not_scan_id = 0 + num_dropped_by_multiple = 0 + for anno in mmengine.track_iter_progress(language_annotations): + language_info = dict() + anno['scan_id'] = to_sample_idx(anno['scan_id']) + language_info.update({ + 'scan_id': anno['scan_id'], + 'text': anno['text'] + }) + if not language_info['scan_id'] in self.scans: + num_dropped_by_not_scan_id += 1 + continue #HACK: only happens at debugging mini data + data = self.scans[language_info['scan_id']] + language_info['scan_id'] = data['scan_id'] + + ann_info = data['ann_info'] + + # save the bounding boxes and corresponding labels + language_anno_info = dict() + language_anno_info['is_view_dep'] = self._is_view_dep( + language_info['text']) + labels = ann_info['gt_labels_3d'] # all box labels in the scan + bboxes = ann_info['gt_bboxes_3d'] # BaseInstanceBboxes + if 'target_id' in anno: # w/ ground truths + language_info.update({'target_id': anno['target_id']}) + # obtain all objects sharing the same category with + # the target object, the num of such objects <= 32 + object_ids = ann_info['bbox_id'] # numpy array + if isinstance(anno['target_id'], int): + object_ind = np.where( + object_ids == language_info['target_id'])[0] + if len(object_ind) != 1: + num_dropped_by_multiple += 1 + continue + language_anno_info['gt_bboxes_3d'] = bboxes[object_ind] + language_anno_info['gt_labels_3d'] = labels[object_ind] + if 'tokens_positive' in anno: + # to be removed after the info being updated + if self.tokens_positive_rebuild: + anno['tokens_positive'] = [[ + anno['text'].find(part), + anno['text'].find(part) + len(part) + ] for part in anno['target'].split()] + language_info['tokens_positive'] = [ + anno['tokens_positive'] + ] + elif isinstance(anno['target_id'], List): + object_indices = [] + keep_indices = [] + is_mapping_unique = True + for idx, target_id in enumerate( + language_info['target_id']): + assert isinstance(target_id, int) + object_ind = np.where(object_ids == target_id)[0] + if len(object_ind) != 1: + is_mapping_unique = False + break + keep_indices.append(idx) + object_indices.append(object_ind[0]) + + if not is_mapping_unique: + num_dropped_by_multiple += 1 + continue + else: + language_anno_info['gt_bboxes_3d'] = bboxes[ + object_indices] + language_anno_info['gt_labels_3d'] = labels[ + object_indices] + if 'tokens_positive' in anno: + language_info['tokens_positive'] = [[ + anno['tokens_positive'][idx] + ] for idx in keep_indices] + else: + raise NotImplementedError + # include other optional keys + optional_keys = ['distractor_ids'] + for key in optional_keys: + if key in anno: + language_info.update({key: anno[key]}) + # the 'distractor_ids' starts from 1, not 0 + language_anno_info['is_hard'] = len( + language_info['distractor_ids'] + ) > 3 # more than three distractors + language_anno_info['is_unique'] = len( + language_info['distractor_ids']) == 0 + + sub_class = anno.get('sub_class', '').lower() + if sub_class: + space = 'space' in sub_class or 'or' in sub_class + attribute = 'attribute' in sub_class or 'eq' in sub_class + assert space + attribute == 1, f'Invalid sub_class {sub_class}, should be space or attribute' + indirect = 'indirect' in sub_class + direct = not indirect + # assert "direct" in sub_class, f"Invalid sub_class {sub_class}, should contain the word direct" + language_anno_info['space'] = space + language_anno_info['direct'] = direct + language_anno_info['sub_class'] = sub_class + else: + language_anno_info['space'] = False + language_anno_info['direct'] = False + else: + # inference w/o gt, assign the placeholder gt_boxes and labels + language_anno_info['gt_bboxes_3d'] = bboxes + language_anno_info['gt_labels_3d'] = labels + # placeholder value for 'is_hard' and 'is_unique' + language_anno_info['is_hard'] = False + language_anno_info['is_unique'] = False + language_anno_info['space'] = False + language_anno_info['direct'] = False + + if not self.test_mode: + language_info['ann_info'] = language_anno_info + + if self.test_mode and self.load_eval_anns: + language_info['ann_info'] = language_anno_info + language_info['eval_ann_info'] = language_info['ann_info'] + + language_infos.append(language_info) + + del self.scans + print('dropped by false scan id', num_dropped_by_not_scan_id) + print('dropped by multiple indices', num_dropped_by_multiple) + return language_infos + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + The only difference with it in `Det3DDataset` + is the specific process for `axis_align_matrix'. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + info['box_type_3d'] = self.box_type_3d + info['axis_align_matrix'] = self._get_axis_align_matrix(info) + + if not self.test_mode: + info['ann_info'] = self.parse_ann_info(info) + if self.test_mode and self.load_eval_anns: + info['ann_info'] = self.parse_ann_info(info) + info['eval_ann_info'] = info['ann_info'] + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Info dict. + + Returns: + dict: Processed `ann_info`. + """ + for instance in info['instances']: + if instance['bbox_label_3d'] < self.det3d_valid_id2label.shape[0]: + value = self.det3d_valid_id2label[instance['bbox_label_3d']] + if value < 0: + raise Exception('Class out of range') + instance['bbox_label_3d'] = value + else: + raise Exception('Class out of range') + + # change some now from the base pkl + name_mapping = { + 'bbox_label_3d': 'gt_labels_3d', + 'bbox_label': 'gt_bboxes_labels', + 'bbox': 'gt_bboxes', + 'bbox_3d': 'gt_bboxes_3d', + 'depth': 'depths', + 'center_2d': 'centers_2d', + 'attr_label': 'attr_labels', + 'velocity': 'velocities', + } + instances = info['instances'] + # empty gt + if len(instances) == 0: + return None + else: + keys = list(instances[0].keys()) + ann_info = dict() + for ann_name in keys: + temp_anns = [item[ann_name] for item in instances] + # map the original dataset label to training label + if 'label' in ann_name and ann_name != 'attr_label': + temp_anns = [ + self.label_mapping[item] for item in temp_anns + ] + if ann_name in name_mapping: + mapped_ann_name = name_mapping[ann_name] + else: + mapped_ann_name = ann_name + + if 'label' in ann_name: + temp_anns = np.array(temp_anns).astype(np.int64) + elif ann_name in name_mapping: + temp_anns = np.array(temp_anns).astype(np.float32) + else: + temp_anns = np.array(temp_anns) + + ann_info[mapped_ann_name] = temp_anns + ann_info['instances'] = info['instances'] + + if ann_info is None: + ann_info = dict() + ann_info['gt_bboxes_3d'] = np.zeros((0, 9), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros((0, ), dtype=np.int64) + + ann_info['gt_bboxes_3d'] = self.box_type_3d( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + with_yaw=True, + origin=(0.5, 0.5, 0.5)) + + return ann_info diff --git a/models/EmbodiedScan/embodiedscan/datasets/pcd_3dvg_dataset_demo.py b/models/EmbodiedScan/embodiedscan/datasets/pcd_3dvg_dataset_demo.py new file mode 100644 index 0000000..f91a9d7 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/pcd_3dvg_dataset_demo.py @@ -0,0 +1,638 @@ +# Copyright (c) OpenRobotLab. All rights reserved. +import os +import warnings +from os import path as osp +from typing import Callable, List, Optional, Union + +import mmengine +import numpy as np +from embodiedscan.registry import DATASETS +from embodiedscan.structures import get_box_type +from lry_utils.utils_read import to_sample_idx +from mmengine.dataset import BaseDataset +from mmengine.fileio import load +from scipy.spatial.transform import Rotation as R + + +def build_demo_info(demo_scene_id='office'): + data_dir = os.path.join('data/open_scan', 'demo_scene_id') + with open(os.path.join(data_dir, 'poses.txt'), 'r') as f: + poses = f.readlines() + axis_align_matrix = np.loadtxt( + os.path.join(data_dir, 'axis_align_matrix.txt')) + intrinsic = np.loadtxt(os.path.join(data_dir, 'intrinsic.txt')) + intrinsic = intrinsic.astype(np.float32) + box_type = get_box_type('Euler-Depth') + info = dict( + axis_align_matrix=axis_align_matrix, + images=[], + img_path=[], + depth_img_path=[], + depth2img=dict(extrinsic=[], + intrinsic=intrinsic, + origin=np.array([.0, .0, .5]).astype(np.float32)), + depth_cam2img=intrinsic, + depth_shift=1000.0, + cam2img=intrinsic, + box_type_3d=box_type[0], + box_mode_3d=box_type[1], + ann_info=dict( # empty annotation + gt_bboxes_3d=np.zeros((0, 9), dtype=np.float32), + gt_labels_3d=np.zeros((0, ), dtype=np.int64), + visible_instance_masks=[[] for i in range(len(poses))])) + n_frames = len(poses) + data = [] + for i in range(1, n_frames): + timestamp, x, y, z, qx, qy, qz, qw = poses[i].split() + x, y, z, qx, qy, qz, qw = float(x), float(y), float(z), float( + qx), float(qy), float(qz), float(qw) + rot_matrix = R.from_quat([qx, qy, qz, qw]).as_matrix() + transform_matrix = np.identity(4) + transform_matrix[:3, :3] = rot_matrix @ [[0, 0, 1], [-1, 0, 0], + [0, -1, 0]] + transform_matrix[:3, 3] = [x, y, z] # CAM to NOT ALIGNED GLOBAL + + image_ann = dict(img_path=os.path.join('demo', demo_scene_id, 'rgb', + timestamp + '.jpg'), + depth_path=os.path.join('demo', demo_scene_id, + 'depth', timestamp + '.png'), + cam2global=transform_matrix, + cam2img=intrinsic) + info['images'].append(image_ann) + info['img_path'].append( + os.path.join(data_dir, 'rgb', timestamp + '.jpg')) + info['depth_img_path'].append( + os.path.join(data_dir, 'depth', timestamp + '.png')) + align_global2cam = np.linalg.inv(axis_align_matrix @ transform_matrix) + info['depth2img']['extrinsic'].append( + align_global2cam.astype(np.float32)) + + +@DATASETS.register_module() +class PointCloud3DGroundingDatasetDemo(BaseDataset): + r"""Multi-View 3D Grounding Dataset for EmbodiedScan. + + This class serves as the API for experiments on the EmbodiedScan Dataset. + + Please refer to `EmbodiedScan Dataset + `_ for data downloading. + + TODO: Merge the implementation with EmbodiedScanDataset. + + Args: + data_root (str): Path of dataset root. + ann_file (str): Path of annotation file. + vg_file (str): Path of the visual grounding annotation file. + metainfo (dict, optional): Meta information for dataset, such as class + information. Defaults to None. + pipeline (List[dict]): Pipeline used for data processing. + Defaults to []. + box_type_3d (str): Type of 3D box of this dataset. + Based on the `box_type_3d`, the dataset will encapsulate the box + to its original format then converted them to `box_type_3d`. + Defaults to 'Euler-Depth' in this dataset. + serialize_data (bool): Whether to serialize all data samples to save + memory. Defaults to False. It is set to True typically, but we + need to do the serialization after getting the data_list through + the preliminary loading and converting. Therefore, we set it to + False by default and serialize data samples at last meanwhile + setting this attribute to True. + filter_empty_gt (bool): Whether to filter the data with empty GT. + If it's set to be True, the example with empty annotations after + data pipeline will be dropped and a random example will be chosen + in `__getitem__`. Defaults to True. + remove_dontcare (bool): Whether to remove objects that we do not care. + Defaults to False. + test_mode (bool): Whether the dataset is in test mode. + Defaults to False. + load_eval_anns (bool): Whether to load evaluation annotations. + Defaults to True. Only take effect when test_mode is True. + """ + # NOTE: category "step" -> "steps" to avoid potential naming conflicts in + # TensorboardVisBackend + METAINFO = { + 'classes': + ('adhesive tape', 'air conditioner', 'alarm', 'album', 'arch', + 'backpack', 'bag', 'balcony', 'ball', 'banister', 'bar', 'barricade', + 'baseboard', 'basin', 'basket', 'bathtub', 'beam', 'beanbag', 'bed', + 'bench', 'bicycle', 'bidet', 'bin', 'blackboard', 'blanket', 'blinds', + 'board', 'body loofah', 'book', 'boots', 'bottle', 'bowl', 'box', + 'bread', 'broom', 'brush', 'bucket', 'cabinet', 'calendar', 'camera', + 'can', 'candle', 'candlestick', 'cap', 'car', 'carpet', 'cart', + 'case', 'ceiling', 'chair', 'chandelier', 'cleanser', 'clock', + 'clothes', 'clothes dryer', 'coat hanger', 'coffee maker', 'coil', + 'column', 'commode', 'computer', 'conducting wire', 'container', + 'control', 'copier', 'cosmetics', 'couch', 'counter', 'countertop', + 'crate', 'crib', 'cube', 'cup', 'curtain', 'cushion', 'decoration', + 'desk', 'detergent', 'device', 'dish rack', 'dishwasher', 'dispenser', + 'divider', 'door', 'door knob', 'doorframe', 'doorway', 'drawer', + 'dress', 'dresser', 'drum', 'duct', 'dumbbell', 'dustpan', 'dvd', + 'eraser', 'excercise equipment', 'fan', 'faucet', 'fence', 'file', + 'fire extinguisher', 'fireplace', 'floor', 'flowerpot', 'flush', + 'folder', 'food', 'footstool', 'frame', 'fruit', 'furniture', + 'garage door', 'garbage', 'glass', 'globe', 'glove', 'grab bar', + 'grass', 'guitar', 'hair dryer', 'hamper', 'handle', 'hanger', 'hat', + 'headboard', 'headphones', 'heater', 'helmets', 'holder', 'hook', + 'humidifier', 'ironware', 'jacket', 'jalousie', 'jar', 'kettle', + 'keyboard', 'kitchen island', 'kitchenware', 'knife', 'label', + 'ladder', 'lamp', 'laptop', 'ledge', 'letter', 'light', 'luggage', + 'machine', 'magazine', 'mailbox', 'map', 'mask', 'mat', 'mattress', + 'menu', 'microwave', 'mirror', 'molding', 'monitor', 'mop', 'mouse', + 'napkins', 'notebook', 'object', 'ottoman', 'oven', 'pack', 'package', + 'pad', 'pan', 'panel', 'paper', 'paper cutter', 'partition', + 'pedestal', 'pen', 'person', 'piano', 'picture', 'pillar', 'pillow', + 'pipe', 'pitcher', 'plant', 'plate', 'player', 'plug', 'plunger', + 'pool', 'pool table', 'poster', 'pot', 'price tag', 'printer', + 'projector', 'purse', 'rack', 'radiator', 'radio', 'rail', + 'range hood', 'refrigerator', 'remote control', 'ridge', 'rod', + 'roll', 'roof', 'rope', 'sack', 'salt', 'scale', 'scissors', 'screen', + 'seasoning', 'shampoo', 'sheet', 'shelf', 'shirt', 'shoe', 'shovel', + 'shower', 'sign', 'sink', 'soap', 'soap dish', 'soap dispenser', + 'socket', 'speaker', 'sponge', 'spoon', 'stairs', 'stall', 'stand', + 'stapler', 'statue', 'steps', 'stick', 'stool', 'stopcock', 'stove', + 'structure', 'sunglasses', 'support', 'switch', 'table', 'tablet', + 'teapot', 'telephone', 'thermostat', 'tissue', 'tissue box', + 'toaster', 'toilet', 'toilet paper', 'toiletry', 'tool', 'toothbrush', + 'toothpaste', 'towel', 'toy', 'tray', 'treadmill', 'trophy', 'tube', + 'tv', 'umbrella', 'urn', 'utensil', 'vacuum cleaner', 'vanity', + 'vase', 'vent', 'ventilation', 'wall', 'wardrobe', 'washbasin', + 'washing machine', 'water cooler', 'water heater', 'window', + 'window frame', 'windowsill', 'wine', 'wire', 'wood', 'wrap'), + 'valid_class_ids': + (1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, + 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, + 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, + 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, + 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, + 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, + 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, + 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, + 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, + 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, + 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, + 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 200, 201, 202, + 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214, 215, 216, + 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229, 230, + 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242, 243, 244, + 245, 246, 247, 248, 249, 250, 251, 252, 253, 254, 255, 256, 257, 258, + 259, 260, 261, 262, 263, 264, 265, 266, 267, 268, 269, 270, 271, 272, + 273, 274, 275, 276, 277, 278, 279, 280, 281, 282, 283, 284, 285, 286, + 287, 288) + } + + def __init__(self, + data_root: str, + ann_file: str, + vg_file: str, + metainfo: Optional[dict] = None, + pipeline: List[Union[dict, Callable]] = [], + box_type_3d: str = 'Euler-Depth', + serialize_data: bool = False, + filter_empty_gt: bool = True, + remove_dontcare: bool = False, + test_mode: bool = False, + load_eval_anns: bool = True, + tokens_positive_rebuild: bool = False, + **kwargs) -> None: + + if 'classes' in metainfo: + if metainfo['classes'] == 'all': + metainfo['classes'] = list(self.METAINFO['classes']) + + self.det3d_valid_id2label = np.zeros( + max(self.METAINFO['valid_class_ids']) + 1, dtype=np.int64) + for _ in range(self.det3d_valid_id2label.shape[0]): + self.det3d_valid_id2label[_] = -1 + for cls_idx, cat_id in enumerate(self.METAINFO['valid_class_ids']): + self.det3d_valid_id2label[cat_id] = cls_idx + + self.box_type_3d, self.box_mode_3d = get_box_type(box_type_3d) + self.filter_empty_gt = filter_empty_gt + self.remove_dontcare = remove_dontcare + self.load_eval_anns = load_eval_anns + self.tokens_positive_rebuild = tokens_positive_rebuild + + super().__init__(data_root=data_root, + ann_file=ann_file, + metainfo=metainfo, + pipeline=pipeline, + serialize_data=serialize_data, + test_mode=test_mode, + **kwargs) + + self.vg_file = osp.join(self.data_root, vg_file) + self.convert_info_to_scan() + self.data_list = self.load_language_data() + print(f'successfully loaded {len(self.data_list)} samples') + self.data_bytes, self.data_address = self._serialize_data() + self.serialize_data = True + + def process_metainfo(self): + """This function will be processed after metainfos from ann_file and + config are combined.""" + assert 'categories' in self._metainfo + + if 'classes' not in self._metainfo: + self._metainfo.setdefault( + 'classes', list(self._metainfo['categories'].keys())) + + self.label_mapping = np.full( + max(list(self._metainfo['categories'].values())) + 1, + -1, + dtype=int) + for key, value in self._metainfo['categories'].items(): + if key in self._metainfo['classes']: + self.label_mapping[value] = self._metainfo['classes'].index( + key) + + self.occ_label_mapping = np.full( + max(list(self._metainfo['categories'].values())) + 1, + -1, + dtype=int) + if 'occ_classes' in self._metainfo: + for idx, label_name in enumerate(self._metainfo['occ_classes']): + self.occ_label_mapping[self.metainfo['categories'][ + label_name]] = idx + 1 # 1-based, 0 is empty + + @staticmethod + def _get_axis_align_matrix(info: dict) -> np.ndarray: + """Get axis_align_matrix from info. If not exist, return identity mat. + + Args: + info (dict): Info of a single sample data. + + Returns: + np.ndarray: 4x4 transformation matrix. + """ + if 'axis_align_matrix' in info: + return np.array(info['axis_align_matrix']) + else: + warnings.warn( + 'axis_align_matrix is not found in ScanNet data info, please ' + 'use new pre-process scripts to re-generate ScanNet data') + return np.eye(4).astype(np.float32) + + # need to compensate the scan_id info to the original pkl file + def convert_info_to_scan(self): + self.scans = dict() + for data in self.data_list: + scan_id = data['sample_idx'] + data.update({'scan_id': scan_id}) + self.scans[scan_id] = data + print('number of true scans:', len(list(self.scans.keys()))) + + @staticmethod + def _is_view_dep(text): + """Check whether to augment based on sr3d utterance.""" + rels = [ + 'front', 'behind', 'back', 'left', 'right', 'facing', 'leftmost', + 'rightmost', 'looking', 'across' + ] + words = set(text.split()) + return any(rel in words for rel in rels) + + def load_data_list(self) -> List[dict]: + """Load annotations from an annotation file named as ``self.ann_file`` + + If the annotation file does not follow `OpenMMLab 2.0 format dataset + `_ . + The subclass must override this method for load annotations. The meta + information of annotation file will be overwritten :attr:`METAINFO` + and ``metainfo`` argument of constructor. + + Returns: + list[dict]: A list of annotation. + """ # noqa: E501 + # `self.ann_file` denotes the absolute annotation file path if + # `self.root=None` or relative path if `self.root=/path/to/data/`. + annotations = load(self.ann_file) + if not isinstance(annotations, dict): + raise TypeError(f'The annotations loaded from annotation file ' + f'should be a dict, but got {type(annotations)}!') + if 'data_list' not in annotations or 'metainfo' not in annotations: + raise ValueError('Annotation must have data_list and metainfo ' + 'keys') + metainfo = annotations['metainfo'] + raw_data_list = annotations['data_list'] + + # Meta information load from annotation file will not influence the + # existed meta information load from `BaseDataset.METAINFO` and + # `metainfo` arguments defined in constructor. + for k, v in metainfo.items(): + self._metainfo.setdefault(k, v) + + self.process_metainfo() + + # load and parse data_infos. + data_list = [] + for raw_data_info in raw_data_list: + # parse raw data information to target format + data_info = self.parse_data_info(raw_data_info) + if isinstance(data_info, dict): + # For image tasks, `data_info` should information if single + # image, such as dict(img_path='xxx', width=360, ...) + data_list.append(data_info) + elif isinstance(data_info, list): + # For video tasks, `data_info` could contain image + # information of multiple frames, such as + # [dict(video_path='xxx', timestamps=...), + # dict(video_path='xxx', timestamps=...)] + for item in data_info: + if not isinstance(item, dict): + raise TypeError('data_info must be list of dict, but ' + f'got {type(item)}') + data_list.extend(data_info) + else: + raise TypeError('data_info should be a dict or list of dict, ' + f'but got {type(data_info)}') + + return data_list + + def load_language_data(self): + # load the object-level annotations + language_annotations = load(self.vg_file) + # language_infos = [ + # { + # 'scan_id': anno['scan_id'], + # 'text': anno['text'], + # 'target_id': int(anno['target_id']), (training) + # 'distractor_ids': anno['distractor_ids'], (training) + # 'tokens_positive': anno['tokens_positive'] (training) + # } + # for anno in language_annotations + # ] + # According to each object annotation, + # find all objects in the corresponding scan + language_infos = [] + print('number of scans loaded from info data:', + len(list(self.scans.keys()))) + num_dropped_by_not_scan_id = 0 + num_dropped_by_multiple = 0 + for anno in mmengine.track_iter_progress(language_annotations): + language_info = dict() + language_info.update({ + 'scan_id': anno['scan_id'], + 'text': anno['text'] + }) + data = self.scans.get(language_info['scan_id'], {}) + language_info['scan_id'] = anno['scan_id'] + + ann_info = data.get('ann_info', {}) + + # save the bounding boxes and corresponding labels + language_anno_info = dict() + language_anno_info['is_view_dep'] = self._is_view_dep( + language_info['text']) + labels = ann_info.get('gt_labels_3d', + np.zeros(0)) # all box labels in the scan + bboxes = ann_info.get('gt_bboxes_3d', np.zeros( + (0, 9))) # BaseInstanceBboxes + if 'target_id' in anno: # w/ ground truths + language_info.update({'target_id': anno['target_id']}) + # obtain all objects sharing the same category with + # the target object, the num of such objects <= 32 + object_ids = ann_info['bbox_id'] # numpy array + if isinstance(anno['target_id'], int): + object_ind = np.where( + object_ids == language_info['target_id'])[0] + if len(object_ind) != 1: + num_dropped_by_multiple += 1 + continue + language_anno_info['gt_bboxes_3d'] = bboxes[object_ind] + language_anno_info['gt_labels_3d'] = labels[object_ind] + if 'tokens_positive' in anno: + # to be removed after the info being updated + if self.tokens_positive_rebuild: + anno['tokens_positive'] = [[ + anno['text'].find(part), + anno['text'].find(part) + len(part) + ] for part in anno['target'].split()] + language_info['tokens_positive'] = [ + anno['tokens_positive'] + ] + elif isinstance(anno['target_id'], List): + object_indices = [] + keep_indices = [] + is_mapping_unique = True + for idx, target_id in enumerate( + language_info['target_id']): + assert isinstance(target_id, int) + object_ind = np.where(object_ids == target_id)[0] + if len(object_ind) != 1: + is_mapping_unique = False + break + keep_indices.append(idx) + object_indices.append(object_ind[0]) + + if not is_mapping_unique: + num_dropped_by_multiple += 1 + continue + else: + language_anno_info['gt_bboxes_3d'] = bboxes[ + object_indices] + language_anno_info['gt_labels_3d'] = labels[ + object_indices] + if 'tokens_positive' in anno: + language_info['tokens_positive'] = [[ + anno['tokens_positive'][idx] + ] for idx in keep_indices] + else: + raise NotImplementedError + # include other optional keys + optional_keys = ['distractor_ids'] + for key in optional_keys: + if key in anno: + language_info.update({key: anno[key]}) + # the 'distractor_ids' starts from 1, not 0 + language_anno_info['is_hard'] = len( + language_info['distractor_ids'] + ) > 3 # more than three distractors + language_anno_info['is_unique'] = len( + language_info['distractor_ids']) == 0 + + sub_class = anno.get('sub_class', '').lower() + if sub_class: + space = 'space' in sub_class or 'or' in sub_class + attribute = 'attribute' in sub_class or 'eq' in sub_class + assert space + attribute == 1, f'Invalid sub_class {sub_class}, should be space or attribute' + indirect = 'indirect' in sub_class + direct = not indirect + # assert "direct" in sub_class, f"Invalid sub_class {sub_class}, should contain the word direct" + language_anno_info['space'] = space + language_anno_info['direct'] = direct + language_anno_info['sub_class'] = sub_class + else: + language_anno_info['space'] = False + language_anno_info['direct'] = False + else: + # inference w/o gt, assign the placeholder gt_boxes and labels + language_anno_info['gt_bboxes_3d'] = bboxes + language_anno_info['gt_labels_3d'] = labels + # placeholder value for 'is_hard' and 'is_unique' + language_anno_info['is_hard'] = False + language_anno_info['is_unique'] = False + language_anno_info['space'] = False + language_anno_info['direct'] = False + + if not self.test_mode: + language_info['ann_info'] = language_anno_info + + if self.test_mode and self.load_eval_anns: + language_info['ann_info'] = language_anno_info + language_info['eval_ann_info'] = language_info['ann_info'] + + language_infos.append(language_info) + + del self.scans + print('dropped by false scan id', num_dropped_by_not_scan_id) + print('dropped by multiple indices', num_dropped_by_multiple) + return language_infos + + def parse_data_info(self, info: dict) -> dict: + """Process the raw data info. + + The only difference with it in `Det3DDataset` + is the specific process for `axis_align_matrix'. + + Args: + info (dict): Raw info dict. + + Returns: + dict: Has `ann_info` in training stage. And + all path has been converted to absolute path. + """ + info['box_type_3d'] = self.box_type_3d + info['axis_align_matrix'] = self._get_axis_align_matrix(info) + # Because multi-view settings are different from original designs + # we temporarily follow the ori design in ImVoxelNet + # info['img_path'] = [] + # info['depth_img_path'] = [] + # info['scan_id'] = info['sample_idx'] + # ann_dataset = info['sample_idx'].split('/')[0] + # if ann_dataset == 'matterport3d': + # info['depth_shift'] = 4000.0 + # else: + # info['depth_shift'] = 1000.0 + + # if 'cam2img' in info: + # cam2img = info['cam2img'].astype(np.float32) + # else: + # cam2img = [] + + # extrinsics = [] + # for i in range(len(info['images'])): + # img_path = os.path.join(self.data_prefix.get('img_path', ''), + # info['images'][i]['img_path']) + # depth_img_path = os.path.join(self.data_prefix.get('img_path', ''), + # info['images'][i]['depth_path']) + + # info['img_path'].append(img_path) + # info['depth_img_path'].append(depth_img_path) + # align_global2cam = np.linalg.inv( + # info['axis_align_matrix'] @ info['images'][i]['cam2global']) + # extrinsics.append(align_global2cam.astype(np.float32)) + # if 'cam2img' not in info: + # cam2img.append(info['images'][i]['cam2img'].astype(np.float32)) + + # info['depth2img'] = dict(extrinsic=extrinsics, + # intrinsic=cam2img, + # origin=np.array([.0, .0, + # .5]).astype(np.float32)) + + # if 'depth_cam2img' not in info: + # info['depth_cam2img'] = cam2img + + if not self.test_mode: + info['ann_info'] = self.parse_ann_info(info) + if self.test_mode and self.load_eval_anns: + info['ann_info'] = self.parse_ann_info(info) + info['eval_ann_info'] = info['ann_info'] + return info + + def parse_ann_info(self, info: dict) -> dict: + """Process the `instances` in data info to `ann_info`. + + Args: + info (dict): Info dict. + + Returns: + dict: Processed `ann_info`. + """ + for instance in info['instances']: + if instance['bbox_label_3d'] < self.det3d_valid_id2label.shape[0]: + value = self.det3d_valid_id2label[instance['bbox_label_3d']] + if value < 0: + raise Exception('Class out of range') + instance['bbox_label_3d'] = value + else: + raise Exception('Class out of range') + + # ann_info = None + # if 'instances' in info and len(info['instances']) > 0: + # ann_info = dict( + # gt_bboxes_3d=np.zeros((len(info['instances']), 9), + # dtype=np.float32), + # gt_labels_3d=np.zeros((len(info['instances']), ), + # dtype=np.int64), + # ) + # for idx, instance in enumerate(info['instances']): + # ann_info['gt_bboxes_3d'][idx] = instance['bbox_3d'] + # ann_info['gt_labels_3d'][idx] = self.label_mapping[ + # instance['bbox_label_3d']] + + # add s or gt prefix for most keys after concat + # we only process 3d annotations here, the corresponding + # 2d annotation process is in the `LoadAnnotations3D` + # in `transforms` + name_mapping = { + 'bbox_label_3d': 'gt_labels_3d', + 'bbox_label': 'gt_bboxes_labels', + 'bbox': 'gt_bboxes', + 'bbox_3d': 'gt_bboxes_3d', + 'depth': 'depths', + 'center_2d': 'centers_2d', + 'attr_label': 'attr_labels', + 'velocity': 'velocities', + } + instances = info['instances'] + # empty gt + if len(instances) == 0: + return None + else: + keys = list(instances[0].keys()) + ann_info = dict() + for ann_name in keys: + temp_anns = [item[ann_name] for item in instances] + # map the original dataset label to training label + if 'label' in ann_name and ann_name != 'attr_label': + temp_anns = [ + self.label_mapping[item] for item in temp_anns + ] + if ann_name in name_mapping: + mapped_ann_name = name_mapping[ann_name] + else: + mapped_ann_name = ann_name + + if 'label' in ann_name: + temp_anns = np.array(temp_anns).astype(np.int64) + elif ann_name in name_mapping: + temp_anns = np.array(temp_anns).astype(np.float32) + else: + temp_anns = np.array(temp_anns) + + ann_info[mapped_ann_name] = temp_anns + ann_info['instances'] = info['instances'] + + if ann_info is None: + ann_info = dict() + ann_info['gt_bboxes_3d'] = np.zeros((0, 9), dtype=np.float32) + ann_info['gt_labels_3d'] = np.zeros((0, ), dtype=np.int64) + + ann_info['gt_bboxes_3d'] = self.box_type_3d( + ann_info['gt_bboxes_3d'], + box_dim=ann_info['gt_bboxes_3d'].shape[-1], + with_yaw=True, + origin=(0.5, 0.5, 0.5)) + + return ann_info diff --git a/embodiedscan/datasets/transforms/__init__.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/__init__.py similarity index 66% rename from embodiedscan/datasets/transforms/__init__.py rename to models/EmbodiedScan/embodiedscan/datasets/transforms/__init__.py index 4dac9ad..963c94e 100644 --- a/embodiedscan/datasets/transforms/__init__.py +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/__init__.py @@ -1,12 +1,16 @@ from .augmentation import GlobalRotScaleTrans, RandomFlip3D +from .default import DefaultPipeline from .formatting import Pack3DDetInputs from .loading import LoadAnnotations3D, LoadDepthFromFile from .multiview import ConstructMultiSweeps, MultiViewPipeline +from .pointcloud import PointCloudPipeline +from .pointcloud_demo import PointCloudPipelineDemo from .points import ConvertRGBDToPoints, PointSample, PointsRangeFilter __all__ = [ 'RandomFlip3D', 'GlobalRotScaleTrans', 'Pack3DDetInputs', 'LoadDepthFromFile', 'LoadAnnotations3D', 'MultiViewPipeline', 'ConstructMultiSweeps', 'ConvertRGBDToPoints', 'PointSample', - 'PointsRangeFilter' + 'PointCloudPipeline', 'PointsRangeFilter', 'PointCloudPipeline', + 'PointCloudPipelineDemo', 'DefaultPipeline' ] diff --git a/embodiedscan/datasets/transforms/augmentation.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/augmentation.py similarity index 99% rename from embodiedscan/datasets/transforms/augmentation.py rename to models/EmbodiedScan/embodiedscan/datasets/transforms/augmentation.py index 512b5b7..17cfd3b 100644 --- a/embodiedscan/datasets/transforms/augmentation.py +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/augmentation.py @@ -1,11 +1,10 @@ from typing import List, Union import numpy as np +from embodiedscan.registry import TRANSFORMS from mmcv.transforms import BaseTransform from mmdet.datasets.transforms import RandomFlip -from embodiedscan.registry import TRANSFORMS - @TRANSFORMS.register_module() class RandomFlip3D(RandomFlip): diff --git a/models/EmbodiedScan/embodiedscan/datasets/transforms/default.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/default.py new file mode 100644 index 0000000..18f9952 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/default.py @@ -0,0 +1,55 @@ +import json +import os + +import numpy as np +import torch +from embodiedscan.registry import TRANSFORMS +from embodiedscan.structures.points import DepthPoints, get_points_type +from lry_utils.utils_read import NUM2RAW_3RSCAN, to_sample_idx, to_scene_id +from mmcv.transforms import BaseTransform, Compose + + +@TRANSFORMS.register_module() +class DefaultPipeline(BaseTransform): + """Multiview data processing pipeline. + + The transform steps are as follows: + + 1. Select frames. + 2. Re-ororganize the selected data structure. + 3. Apply transforms for each selected frame. + 4. Concatenate data to form a batch. + + Args: + transforms (list[dict | callable]): + The transforms to be applied to each select frame. + n_images (int): Number of frames selected per scene. + ordered (bool): Whether to put these frames in order. + Defaults to False. + """ + + def __init__(self, ordered=False, keep_rgb=True): + super().__init__() + + def transform(self, results: dict) -> dict: + """Transform function. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: output dict after transformation. + """ + # adding pcd info + pc, color, _, _ = torch.load(results['pc_file']) + + points = np.concatenate([pc, color], axis=-1) + points = DepthPoints(points, + points_dim=6, + attribute_dims=dict(color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ])) + results['points'] = points + return results diff --git a/embodiedscan/datasets/transforms/formatting.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/formatting.py similarity index 98% rename from embodiedscan/datasets/transforms/formatting.py rename to models/EmbodiedScan/embodiedscan/datasets/transforms/formatting.py index ee7e8d5..23c21e7 100644 --- a/embodiedscan/datasets/transforms/formatting.py +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/formatting.py @@ -3,13 +3,12 @@ import mmengine import numpy as np import torch -from mmcv.transforms import BaseTransform -from mmengine.structures import InstanceData, PixelData - from embodiedscan.registry import TRANSFORMS from embodiedscan.structures.bbox_3d import BaseInstance3DBoxes from embodiedscan.structures.points import BasePoints from embodiedscan.utils.typing_config import Det3DDataElement, PointData +from mmcv.transforms import BaseTransform +from mmengine.structures import InstanceData, PixelData def to_tensor( @@ -46,7 +45,7 @@ def to_tensor( @TRANSFORMS.register_module() class Pack3DDetInputs(BaseTransform): - INPUTS_KEYS = ['points', 'img'] + INPUTS_KEYS = ['points', 'img', 'sample_idx_mmscan'] # to be compatible with depths in bevdepth INSTANCEDATA_3D_KEYS = [ 'gt_bboxes_3d', 'gt_labels_3d', 'attr_labels', 'depths', 'centers_2d' @@ -274,6 +273,8 @@ def pack_single_results(self, results: dict) -> dict: data_sample.gt_depth_map = gt_depth_map if 'eval_ann_info' in results: + results['eval_ann_info']['sample_idx_mmscan'] = inputs.get( + 'sample_idx_mmscan') data_sample.eval_ann_info = results['eval_ann_info'] else: data_sample.eval_ann_info = None @@ -281,6 +282,7 @@ def pack_single_results(self, results: dict) -> dict: packed_results = dict() packed_results['data_samples'] = data_sample packed_results['inputs'] = inputs + # assert packed_results['inputs']['sample_idx_mmscan'] # lry mod return packed_results def __repr__(self) -> str: diff --git a/embodiedscan/datasets/transforms/loading.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/loading.py similarity index 99% rename from embodiedscan/datasets/transforms/loading.py rename to models/EmbodiedScan/embodiedscan/datasets/transforms/loading.py index b13baed..8b2f8ba 100644 --- a/embodiedscan/datasets/transforms/loading.py +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/loading.py @@ -3,11 +3,10 @@ import mmcv import mmengine import numpy as np +from embodiedscan.registry import TRANSFORMS from mmcv.transforms import BaseTransform from mmdet.datasets.transforms import LoadAnnotations -from embodiedscan.registry import TRANSFORMS - @TRANSFORMS.register_module() class LoadDepthFromFile(BaseTransform): diff --git a/embodiedscan/datasets/transforms/multiview.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/multiview.py similarity index 99% rename from embodiedscan/datasets/transforms/multiview.py rename to models/EmbodiedScan/embodiedscan/datasets/transforms/multiview.py index 6e70d22..d146fc1 100644 --- a/embodiedscan/datasets/transforms/multiview.py +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/multiview.py @@ -1,9 +1,8 @@ import numpy as np import torch -from mmcv.transforms import BaseTransform, Compose - from embodiedscan.registry import TRANSFORMS from embodiedscan.structures.points import get_points_type +from mmcv.transforms import BaseTransform, Compose @TRANSFORMS.register_module() diff --git a/models/EmbodiedScan/embodiedscan/datasets/transforms/pointcloud.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/pointcloud.py new file mode 100644 index 0000000..8b73dd8 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/pointcloud.py @@ -0,0 +1,62 @@ +import json +import os + +import numpy as np +import torch +from embodiedscan.registry import TRANSFORMS +from embodiedscan.structures.points import DepthPoints, get_points_type +from lry_utils.utils_read import NUM2RAW_3RSCAN, to_sample_idx, to_scene_id +from mmcv.transforms import BaseTransform, Compose + + +@TRANSFORMS.register_module() +class PointCloudPipeline(BaseTransform): + """Multiview data processing pipeline. + + The transform steps are as follows: + + 1. Select frames. + 2. Re-ororganize the selected data structure. + 3. Apply transforms for each selected frame. + 4. Concatenate data to form a batch. + + Args: + transforms (list[dict | callable]): + The transforms to be applied to each select frame. + n_images (int): Number of frames selected per scene. + ordered (bool): Whether to put these frames in order. + Defaults to False. + """ + + def __init__(self, ordered=False, keep_rgb=True): + super().__init__() + self.ordered = ordered + self.keep_rgb = keep_rgb + + def transform(self, results: dict) -> dict: + """Transform function. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: output dict after transformation. + """ + scene_id = to_scene_id(results['scan_id']) + scene_id = NUM2RAW_3RSCAN.get(scene_id, scene_id) + pcd_path = f'/mnt/hwfile/OpenRobotLab/lvruiyuan/pcd_data/pcd_with_global_alignment/{scene_id}.pth' + pc, color, label, instance_ids = torch.load(pcd_path) + if self.keep_rgb: + points = np.concatenate([pc, color], axis=-1) + points = DepthPoints(points, + points_dim=6, + attribute_dims=dict(color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ])) + else: + points = DepthPoints(pc) + _results = {'points': points} + results.update(_results) + return results diff --git a/models/EmbodiedScan/embodiedscan/datasets/transforms/pointcloud_demo.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/pointcloud_demo.py new file mode 100644 index 0000000..6931e6d --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/pointcloud_demo.py @@ -0,0 +1,64 @@ +import json +import os + +import numpy as np +import torch +from embodiedscan.registry import TRANSFORMS +from embodiedscan.structures.points import DepthPoints, get_points_type +from lry_utils.utils_read import NUM2RAW_3RSCAN, to_sample_idx, to_scene_id +from mmcv.transforms import BaseTransform, Compose + + +@TRANSFORMS.register_module() +class PointCloudPipelineDemo(BaseTransform): + """Multiview data processing pipeline. + + The transform steps are as follows: + + 1. Select frames. + 2. Re-ororganize the selected data structure. + 3. Apply transforms for each selected frame. + 4. Concatenate data to form a batch. + + Args: + transforms (list[dict | callable]): + The transforms to be applied to each select frame. + n_images (int): Number of frames selected per scene. + ordered (bool): Whether to put these frames in order. + Defaults to False. + """ + + def __init__(self, ordered=False, keep_rgb=True): + super().__init__() + self.ordered = ordered + self.keep_rgb = keep_rgb + + def transform(self, results: dict) -> dict: + """Transform function. + + Args: + results (dict): Result dict from loading pipeline. + + Returns: + dict: output dict after transformation. + """ + scene_id = results['scan_id'] + assert scene_id in ['office', 'restroom', 'restroom2'], scene_id + pcd_path = f'/mnt/petrelfs/lvruiyuan/repos/EmbodiedScan/data/open_scan/{scene_id}.npy' + pc_with_color = np.load(pcd_path) + pc, color = pc_with_color[:, :3], pc_with_color[:, 3:] + color = color / 255 + if self.keep_rgb: + points = np.concatenate([pc, color], axis=-1) + points = DepthPoints(points, + points_dim=6, + attribute_dims=dict(color=[ + points.shape[1] - 3, + points.shape[1] - 2, + points.shape[1] - 1, + ])) + else: + points = DepthPoints(pc) + _results = {'points': points} + results.update(_results) + return results diff --git a/embodiedscan/datasets/transforms/points.py b/models/EmbodiedScan/embodiedscan/datasets/transforms/points.py similarity index 99% rename from embodiedscan/datasets/transforms/points.py rename to models/EmbodiedScan/embodiedscan/datasets/transforms/points.py index c65398e..6feee7b 100644 --- a/embodiedscan/datasets/transforms/points.py +++ b/models/EmbodiedScan/embodiedscan/datasets/transforms/points.py @@ -1,11 +1,10 @@ from typing import List, Optional, Tuple, Union import numpy as np -from mmcv.transforms import BaseTransform - from embodiedscan.registry import TRANSFORMS from embodiedscan.structures.bbox_3d import points_cam2img, points_img2cam from embodiedscan.structures.points import BasePoints, get_points_type +from mmcv.transforms import BaseTransform @TRANSFORMS.register_module() diff --git a/embodiedscan/eval/__init__.py b/models/EmbodiedScan/embodiedscan/eval/__init__.py similarity index 100% rename from embodiedscan/eval/__init__.py rename to models/EmbodiedScan/embodiedscan/eval/__init__.py diff --git a/embodiedscan/eval/indoor_eval.py b/models/EmbodiedScan/embodiedscan/eval/indoor_eval.py similarity index 99% rename from embodiedscan/eval/indoor_eval.py rename to models/EmbodiedScan/embodiedscan/eval/indoor_eval.py index 7ec1f48..d4a9e9a 100644 --- a/embodiedscan/eval/indoor_eval.py +++ b/models/EmbodiedScan/embodiedscan/eval/indoor_eval.py @@ -26,6 +26,7 @@ def average_precision(recalls, precisions, mode='area'): assert recalls.shape == precisions.shape assert recalls.ndim == 2 + print('the should of prediction is, ', recalls.shape) num_scales = recalls.shape[0] ap = np.zeros(num_scales, dtype=np.float32) diff --git a/models/EmbodiedScan/embodiedscan/eval/metrics/__init__.py b/models/EmbodiedScan/embodiedscan/eval/metrics/__init__.py new file mode 100644 index 0000000..c412896 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/eval/metrics/__init__.py @@ -0,0 +1,9 @@ +from .det_metric import IndoorDetMetric +from .grounding_metric import GroundingMetric +from .grounding_metric_mod import GroundingMetricMod +from .occupancy_metric import OccupancyMetric + +__all__ = [ + 'IndoorDetMetric', 'OccupancyMetric', 'GroundingMetric', + 'GroundingMetricMod' +] diff --git a/embodiedscan/eval/metrics/det_metric.py b/models/EmbodiedScan/embodiedscan/eval/metrics/det_metric.py similarity index 90% rename from embodiedscan/eval/metrics/det_metric.py rename to models/EmbodiedScan/embodiedscan/eval/metrics/det_metric.py index 302f496..3f03234 100644 --- a/embodiedscan/eval/metrics/det_metric.py +++ b/models/EmbodiedScan/embodiedscan/eval/metrics/det_metric.py @@ -3,7 +3,10 @@ from collections import OrderedDict from typing import Dict, List, Optional, Sequence, Union +import mmengine import numpy as np +from embodiedscan.registry import METRICS +from embodiedscan.structures import get_box_type from mmdet.evaluation import eval_map from mmengine.dist import (broadcast_object_list, collect_results, is_main_process) @@ -11,9 +14,6 @@ from mmengine.evaluator.metric import _to_cpu from mmengine.logging import MMLogger, print_log -from embodiedscan.registry import METRICS -from embodiedscan.structures import get_box_type - from ..indoor_eval import indoor_eval @@ -38,11 +38,13 @@ def __init__(self, collect_device: str = 'cpu', prefix: Optional[str] = None, batchwise_anns: bool = False, + format_only: bool = False, **kwargs) -> None: super(IndoorDetMetric, self).__init__(prefix=prefix, collect_device=collect_device) self.iou_thr = [iou_thr] if isinstance(iou_thr, float) else iou_thr self.batchwise_anns = batchwise_anns + self.format_only = format_only def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: """Process one batch of data samples and predictions. @@ -83,6 +85,29 @@ def compute_metrics(self, results: list) -> Dict[str, float]: ann_infos.append(eval_ann) pred_results.append(sinlge_pred_results) + if self.format_only: + print_log('saving prediction results') + # import pdb; pdb.set_trace() + save = [] + for ann, pred in results: + try: + result = {} + pred_boxes = pred['bboxes_3d'].tensor + scores = pred['scores_3d'] + labels = pred['labels_3d'] + result['sample_idx'] = ann['sample_idx_mmscan'] + result['pred_boxes'] = pred_boxes.numpy() + result['scores'] = scores.numpy() + result['labels'] = labels.numpy() + save.append(result) + except Exception as e: + print(e) + continue + mmengine.dump( + save, + '/mnt/petrelfs/lvruiyuan/repos/EmbodiedScan/work_dirs/det_predictions/pred_results_aaa.json' + ) + return {} # some checkpoints may not record the key "box_type_3d" box_type_3d, box_mode_3d = get_box_type( self.dataset_meta.get('box_type_3d', 'depth')) diff --git a/embodiedscan/eval/metrics/grounding_metric.py b/models/EmbodiedScan/embodiedscan/eval/metrics/grounding_metric.py similarity index 99% rename from embodiedscan/eval/metrics/grounding_metric.py rename to models/EmbodiedScan/embodiedscan/eval/metrics/grounding_metric.py index 5168b50..837d318 100644 --- a/embodiedscan/eval/metrics/grounding_metric.py +++ b/models/EmbodiedScan/embodiedscan/eval/metrics/grounding_metric.py @@ -3,13 +3,12 @@ from typing import Dict, List, Optional, Sequence import mmengine +from embodiedscan.registry import METRICS +from embodiedscan.structures import EulerDepthInstance3DBoxes from mmengine.evaluator import BaseMetric from mmengine.logging import MMLogger, print_log from terminaltables import AsciiTable -from embodiedscan.registry import METRICS -from embodiedscan.structures import EulerDepthInstance3DBoxes - @METRICS.register_module() class GroundingMetric(BaseMetric): diff --git a/models/EmbodiedScan/embodiedscan/eval/metrics/grounding_metric_mod.py b/models/EmbodiedScan/embodiedscan/eval/metrics/grounding_metric_mod.py new file mode 100644 index 0000000..100ead4 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/eval/metrics/grounding_metric_mod.py @@ -0,0 +1,181 @@ +# Copyright (c) OpenRobotLab. All rights reserved. +import os +from typing import Dict, List, Optional, Sequence + +import mmengine +import numpy as np +import torch +from embodiedscan.registry import METRICS +from embodiedscan.structures import EulerDepthInstance3DBoxes +from mmengine.evaluator import BaseMetric +from mmengine.logging import MMLogger, print_log +from scipy.optimize import linear_sum_assignment +from terminaltables import AsciiTable +from tqdm import tqdm + +from mmscan import VisualGroundingEvaluator + + +def abbr(sub_class): + sub_class = sub_class.lower() + sub_class = sub_class.replace('single', 'sngl') + sub_class = sub_class.replace('inter', 'int') + sub_class = sub_class.replace('unique', 'uniq') + sub_class = sub_class.replace('common', 'cmn') + sub_class = sub_class.replace('attribute', 'attr') + if 'sngl' in sub_class and ('attr' in sub_class or 'eq' in sub_class): + sub_class = 'vg_sngl_attr' + return sub_class + + +@METRICS.register_module() +class GroundingMetricMod(BaseMetric): + """Lanuage grounding evaluation metric. We calculate the grounding + performance based on the alignment score of each bbox with the input + prompt. + + Args: + iou_thr (float or List[float]): List of iou threshold when calculate + the metric. Defaults to [0.25, 0.5]. + collect_device (str): Device name used for collecting results from + different ranks during distributed training. Must be 'cpu' or + 'gpu'. Defaults to 'cpu'. + prefix (str, optional): The prefix that will be added in the metric + names to disambiguate homonymous metrics of different evaluators. + If prefix is not provided in the argument, self.default_prefix will + be used instead. Defaults to None. + format_only (bool): Whether to only inference the predictions without + evaluation. Defaults to False. + result_dir (str): Dir to save results, e.g., if result_dir = './', + the result file will be './test_results.json'. Defaults to ''. + """ + + def __init__(self, + iou_thr: List[float] = [0.25, 0.5], + collect_device: str = 'cpu', + prefix: Optional[str] = None, + format_only=False, + result_dir='') -> None: + super(GroundingMetricMod, self).__init__(prefix=prefix, + collect_device=collect_device) + self.iou_thr = [iou_thr] if isinstance(iou_thr, float) else iou_thr + self.prefix = prefix + self.format_only = format_only + self.result_dir = result_dir + self.mmscan_eval = VisualGroundingEvaluator(True) + + def to_mmscan_form(self, det_annos, gt_annos): + batch_input = [] + for i, (gt_anno, det_anno) in tqdm(enumerate(zip(gt_annos, + det_annos))): + + _input = {} + _input['pred_scores'] = det_anno['target_scores_3d'] + _input['pred_bboxes'] = det_anno['bboxes_3d'] + + _input['gt_bboxes'] = gt_anno['gt_bboxes_3d'] + _input['subclass'] = gt_anno['sub_class'] + _input['pred_bboxes'] = torch.stack([euler_box for euler_box in _input['pred_bboxes']])\ + if len(_input['pred_bboxes']) > 0 else torch.empty(0, 9) + + _input['gt_bboxes'] = torch.stack([euler_box for euler_box in _input['gt_bboxes']]) \ + if len(_input['gt_bboxes']) > 0 else torch.empty(0, 9) + + batch_input.append(_input) + + return batch_input + + def process(self, data_batch: dict, data_samples: Sequence[dict]) -> None: + """Process one batch of data samples and predictions. + + The processed results should be stored in ``self.results``, which will + be used to compute the metrics when all batches have been processed. + + Args: + data_batch (dict): A batch of data from the dataloader. + data_samples (Sequence[dict]): A batch of outputs from the model. + """ + for data_sample in data_samples: + pred_3d = data_sample['pred_instances_3d'] + eval_ann_info = data_sample['eval_ann_info'] + cpu_pred_3d = dict() + for k, v in pred_3d.items(): + if hasattr(v, 'to'): + cpu_pred_3d[k] = v.to('cpu') + else: + cpu_pred_3d[k] = v + self.results.append((eval_ann_info, cpu_pred_3d)) + + def ground_eval(self, gt_annos, det_annos, logger=None): + + assert len(det_annos) == len(gt_annos) + + metric_results = {} + + batch_input = self.to_mmscan_form(det_annos, gt_annos) + + self.mmscan_eval.reset() + + self.mmscan_eval.update(batch_input) + + print('Staring evaluation!') + self.mmscan_eval.start_evaluation() + + result_table = self.mmscan_eval.print_result() + + print_log('\n' + result_table, logger=logger) + return metric_results + + def compute_metrics(self, results: list) -> Dict[str, float]: + """Compute the metrics from processed results after all batches have + been processed. + + Args: + results (list): The processed results of each batch. + + Returns: + Dict[str, float]: The computed metrics. The keys are the names of + the metrics, and the values are corresponding results. + """ + logger: MMLogger = MMLogger.get_current_instance() # noqa + annotations, preds = zip(*results) + ret_dict = {} + # if self.format_only: + # # preds is a list of dict + # results = [] + # print_log("If you see this, you are in function: compute metrics with format only.") + # for pred in preds: + # result = dict() + # # convert the Euler boxes to the numpy array to save + # bboxes_3d = pred['bboxes_3d'].tensor + # scores_3d = pred['scores_3d'] + # # Note: hard-code save top-20 predictions + # # eval top-10 predictions during the test phase by default + # box_index = scores_3d.argsort(dim=-1, descending=True) + # top_bboxes_3d = bboxes_3d[box_index] + # top_scores_3d = scores_3d[box_index] + # result['bboxes_3d'] = top_bboxes_3d.numpy() + # result['scores_3d'] = top_scores_3d.numpy() + # results.append(result) + # mmengine.dump(results, + # os.path.join(self.result_dir, 'test_results.json')) + # return ret_dict + # try: + # torch.save({"pred_list":preds,"gt_list":annotations},"/mnt/petrelfs/linjingli/tmp/data/big_tmp/result_og_es2.pt") + # except: + # print("saving fail") + ret_dict = self.ground_eval(annotations, preds) + + return ret_dict + + +if __name__ == '__main__': + + result_file = torch.load( + '/mnt/petrelfs/linjingli/tmp/data/big_tmp/result_og_es.pt') + + annotations = result_file['gt_list'] + preds = result_file['pred_list'] + + test_eval = GroundingMetricMod() + test_eval.ground_eval(annotations, preds) diff --git a/embodiedscan/eval/metrics/occupancy_metric.py b/models/EmbodiedScan/embodiedscan/eval/metrics/occupancy_metric.py similarity index 99% rename from embodiedscan/eval/metrics/occupancy_metric.py rename to models/EmbodiedScan/embodiedscan/eval/metrics/occupancy_metric.py index e94e2bb..d05c743 100644 --- a/embodiedscan/eval/metrics/occupancy_metric.py +++ b/models/EmbodiedScan/embodiedscan/eval/metrics/occupancy_metric.py @@ -4,6 +4,7 @@ import numpy as np import torch +from embodiedscan.registry import METRICS from mmengine.dist import (broadcast_object_list, collect_results, is_main_process) from mmengine.evaluator import BaseMetric @@ -11,8 +12,6 @@ from mmengine.logging import MMLogger, print_log from terminaltables import AsciiTable -from embodiedscan.registry import METRICS - @METRICS.register_module() class OccupancyMetric(BaseMetric): diff --git a/embodiedscan/explorer.py b/models/EmbodiedScan/embodiedscan/explorer.py similarity index 86% rename from embodiedscan/explorer.py rename to models/EmbodiedScan/embodiedscan/explorer.py index eecd58c..9e1db3e 100644 --- a/embodiedscan/explorer.py +++ b/models/EmbodiedScan/embodiedscan/explorer.py @@ -4,15 +4,13 @@ import numpy as np import open3d as o3d - from embodiedscan.visualization.color_selector import ColorMap from embodiedscan.visualization.continuous_drawer import ( - ContinuousDrawer, ContinuousOccupancyDrawer, - ContinuousPredictionOccupancyDrawer) + ContinuousDrawer, ContinuousOccupancyDrawer) from embodiedscan.visualization.img_drawer import ImageDrawer from embodiedscan.visualization.utils import _9dof_to_box, _box_add_thickness -DATASETS = ['scannet', '3rscan', 'matterport3d', 'arkitscenes'] +DATASETS = ['scannet', '3rscan', 'matterport3d'] class EmbodiedScanExplorer: @@ -65,7 +63,7 @@ def __init__(self, if self.verbose: print('Dataset root') for dataset in DATASETS: - print(dataset, ':', self.data_root.get(dataset, None)) + print(dataset, ':', self.data_root[dataset]) if self.verbose: print('Loading') @@ -118,10 +116,6 @@ def __init__(self, building, region = splits[1], splits[2] dirpath = os.path.join(self.data_root['matterport3d'], building) - elif dataset == 'arkitscenes': - split, region = splits[1], splits[2] - dirpath = os.path.join(self.data_root['arkitscenes'], - split, region) else: region = splits[1] dirpath = os.path.join(self.data_root[dataset], region) @@ -172,8 +166,6 @@ def list_cameras(self, scene): elif dataset == 'matterport3d': cam_name = img_path.split( '/')[-1][:-8] + img_path.split('/')[-1][-7:-4] - elif dataset == 'arkitscenes': - cam_name = img_path.split('/')[-1][:-4] else: cam_name = img_path.split('/')[-1][:-4] res.append(cam_name) @@ -258,9 +250,6 @@ def render_scene(self, scene_name, render_box=False): elif dataset == 'matterport3d': filepath = os.path.join(self.data_root['matterport3d'], building, 'region_segmentations', f'{region}.ply') - elif dataset == 'arkitscenes': - filepath = os.path.join(self.data_root['arkitscenes'], building, - region, f'{region}_3dod_mesh.ply') else: raise NotImplementedError @@ -320,8 +309,6 @@ def render_continuous_scene(self, elif dataset == 'matterport3d': cam_name = img_path.split( '/')[-1][:-8] + img_path.split('/')[-1][-7:-4] - elif dataset == 'arkitscenes': - cam_name = img_path.split('/')[-1][:-4] else: cam_name = img_path.split('/')[-1][:-4] if cam_name == start_cam: @@ -394,59 +381,6 @@ def render_continuous_occupancy(self, scene_name, start_cam=None): self.color_selector, start_idx) drawer.begin() - def render_continuous_occupancy_prediction(self, - scene_name, - start_cam=None): - """Render occupancy prediction with continuous ego-centric - observations. - - Args: - scene_name (str): Scene name. - start_cam (str, optional): Camera frame from which the rendering - starts. Defaults to None, corresponding to the first frame. - """ - s = scene_name.split('/') - if len(s) == 2: - dataset, region = s - else: - dataset, building, region = s - - selected_scene = None - start_idx = -1 - for scene in self.data: - if scene['sample_idx'] == scene_name: - selected_scene = scene - if start_cam is not None: - start_idx = -1 - for i, img in enumerate(scene['images']): - img_path = img['img_path'] - if dataset == 'scannet': - cam_name = img_path.split('/')[-1][:-4] - elif dataset == '3rscan': - cam_name = img_path.split('/')[-1][:-10] - elif dataset == 'matterport3d': - cam_name = img_path.split( - '/')[-1][:-8] + img_path.split('/')[-1][-7:-4] - else: - cam_name = img_path.split('/')[-1][:-4] - if cam_name == start_cam: - start_idx = i - break - if start_idx == -1: - print('No such camera') - return - else: - start_idx = 0 - - if selected_scene is None: - print('No such scene') - return - - drawer = ContinuousPredictionOccupancyDrawer( - dataset, self.data_root[dataset], selected_scene, self.classes, - self.id_to_index, self.color_selector, start_idx) - drawer.begin() - def render_occupancy(self, scene_name): """Render the occupancy annotation of a given scene. diff --git a/embodiedscan/models/__init__.py b/models/EmbodiedScan/embodiedscan/models/__init__.py similarity index 100% rename from embodiedscan/models/__init__.py rename to models/EmbodiedScan/embodiedscan/models/__init__.py diff --git a/embodiedscan/models/backbones/__init__.py b/models/EmbodiedScan/embodiedscan/models/backbones/__init__.py similarity index 100% rename from embodiedscan/models/backbones/__init__.py rename to models/EmbodiedScan/embodiedscan/models/backbones/__init__.py diff --git a/embodiedscan/models/backbones/mink_resnet.py b/models/EmbodiedScan/embodiedscan/models/backbones/mink_resnet.py similarity index 99% rename from embodiedscan/models/backbones/mink_resnet.py rename to models/EmbodiedScan/embodiedscan/models/backbones/mink_resnet.py index 8f17728..43ff0d4 100644 --- a/embodiedscan/models/backbones/mink_resnet.py +++ b/models/EmbodiedScan/embodiedscan/models/backbones/mink_resnet.py @@ -12,9 +12,8 @@ ME = BasicBlock = Bottleneck = SparseTensor = None import torch.nn as nn -from mmengine.model import BaseModule - from embodiedscan.registry import MODELS +from mmengine.model import BaseModule @MODELS.register_module() diff --git a/embodiedscan/models/data_preprocessors/__init__.py b/models/EmbodiedScan/embodiedscan/models/data_preprocessors/__init__.py similarity index 100% rename from embodiedscan/models/data_preprocessors/__init__.py rename to models/EmbodiedScan/embodiedscan/models/data_preprocessors/__init__.py diff --git a/embodiedscan/models/data_preprocessors/data_preprocessor.py b/models/EmbodiedScan/embodiedscan/models/data_preprocessors/data_preprocessor.py similarity index 99% rename from embodiedscan/models/data_preprocessors/data_preprocessor.py rename to models/EmbodiedScan/embodiedscan/models/data_preprocessors/data_preprocessor.py index 5fef338..539564d 100644 --- a/embodiedscan/models/data_preprocessors/data_preprocessor.py +++ b/models/EmbodiedScan/embodiedscan/models/data_preprocessors/data_preprocessor.py @@ -5,6 +5,8 @@ import numpy as np import torch +from embodiedscan.registry import MODELS +from embodiedscan.utils.typing_config import ConfigType, SampleList from mmdet.models import DetDataPreprocessor from mmdet.models.utils.misc import samplelist_boxtype2tensor from mmengine.model import stack_batch @@ -13,9 +15,6 @@ from torch import Tensor from torch.nn import functional as F -from embodiedscan.registry import MODELS -from embodiedscan.utils.typing_config import ConfigType, SampleList - from .utils import multiview_img_stack_batch from .voxelize import VoxelizationByGridShape, dynamic_scatter_3d diff --git a/embodiedscan/models/data_preprocessors/utils.py b/models/EmbodiedScan/embodiedscan/models/data_preprocessors/utils.py similarity index 100% rename from embodiedscan/models/data_preprocessors/utils.py rename to models/EmbodiedScan/embodiedscan/models/data_preprocessors/utils.py diff --git a/embodiedscan/models/data_preprocessors/voxelize.py b/models/EmbodiedScan/embodiedscan/models/data_preprocessors/voxelize.py similarity index 100% rename from embodiedscan/models/data_preprocessors/voxelize.py rename to models/EmbodiedScan/embodiedscan/models/data_preprocessors/voxelize.py diff --git a/models/EmbodiedScan/embodiedscan/models/dense_heads/__init__.py b/models/EmbodiedScan/embodiedscan/models/dense_heads/__init__.py new file mode 100644 index 0000000..ae20f42 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/models/dense_heads/__init__.py @@ -0,0 +1,4 @@ +from .fcaf3d_head import FCAF3DHead, FCAF3DHeadRotMat +from .grounding_head import GroundingHead + +__all__ = ['FCAF3DHead', 'FCAF3DHeadRotMat', 'GroundingHead'] diff --git a/embodiedscan/models/dense_heads/fcaf3d_head.py b/models/EmbodiedScan/embodiedscan/models/dense_heads/fcaf3d_head.py similarity index 99% rename from embodiedscan/models/dense_heads/fcaf3d_head.py rename to models/EmbodiedScan/embodiedscan/models/dense_heads/fcaf3d_head.py index 296f57b..f729f31 100644 --- a/embodiedscan/models/dense_heads/fcaf3d_head.py +++ b/models/EmbodiedScan/embodiedscan/models/dense_heads/fcaf3d_head.py @@ -11,19 +11,18 @@ pass import torch -from mmcv.cnn import Scale -from mmcv.ops import nms3d, nms3d_normal -from mmengine.model import BaseModel, bias_init_with_prob -from mmengine.structures import InstanceData -from pytorch3d.transforms import euler_angles_to_matrix, matrix_to_euler_angles -from torch import Tensor, nn - from embodiedscan.models.losses import BBoxCDLoss, RotatedIoU3DLoss from embodiedscan.registry import MODELS from embodiedscan.structures import (BaseInstance3DBoxes, rotation_3d_in_axis, rotation_3d_in_euler) from embodiedscan.utils.dist_utils import reduce_mean from embodiedscan.utils.typing_config import InstanceList, SampleList +from mmcv.cnn import Scale +from mmcv.ops import nms3d, nms3d_normal +from mmengine.model import BaseModel, bias_init_with_prob +from mmengine.structures import InstanceData +from pytorch3d.transforms import euler_angles_to_matrix, matrix_to_euler_angles +from torch import Tensor, nn @MODELS.register_module() diff --git a/embodiedscan/models/dense_heads/grounding_head.py b/models/EmbodiedScan/embodiedscan/models/dense_heads/grounding_head.py similarity index 99% rename from embodiedscan/models/dense_heads/grounding_head.py rename to models/EmbodiedScan/embodiedscan/models/dense_heads/grounding_head.py index 44b4a9b..061689b 100644 --- a/embodiedscan/models/dense_heads/grounding_head.py +++ b/models/EmbodiedScan/embodiedscan/models/dense_heads/grounding_head.py @@ -5,6 +5,10 @@ import torch import torch.nn as nn +from embodiedscan.registry import MODELS, TASK_UTILS +from embodiedscan.structures import (EulerDepthInstance3DBoxes, + rotation_3d_in_axis, rotation_3d_in_euler) +from embodiedscan.utils.typing_config import SampleList from mmcv.cnn import Linear from mmdet.models.utils import multi_apply from mmdet.utils import ConfigType, InstanceList, OptMultiConfig, reduce_mean @@ -13,11 +17,6 @@ from pytorch3d.transforms import matrix_to_euler_angles from torch import Tensor -from embodiedscan.registry import MODELS, TASK_UTILS -from embodiedscan.structures import (EulerDepthInstance3DBoxes, - rotation_3d_in_axis, rotation_3d_in_euler) -from embodiedscan.utils.typing_config import SampleList - class ContrastiveEmbed(nn.Module): """text visual ContrastiveEmbed layer. @@ -329,6 +328,7 @@ def _bbox_pred_to_bbox(self, points, bbox_pred: Tensor) -> Tensor: bbox_pred[..., 2] + bbox_pred[..., 3], bbox_pred[..., 4] + bbox_pred[..., 5], ], -1) + base_bbox[..., 3:6] = base_bbox.clamp(min=2e-2) return base_bbox # for rotated boxes (7-DoF or 9-DoF) # dx_min, dx_max, dy_min, dy_max, dz_min, dz_max, alpha -> @@ -357,6 +357,7 @@ def _bbox_pred_to_bbox(self, points, bbox_pred: Tensor) -> Tensor: (bbox_pred[..., 0] + bbox_pred[..., 1], bbox_pred[..., 2] + bbox_pred[..., 3], bbox_pred[..., 4] + bbox_pred[..., 5]), dim=-1) + size = size.clamp(min=2e-2) return torch.cat((center, size, euler), dim=-1).view(batch_size, num_queries, -1) else: diff --git a/embodiedscan/models/dense_heads/imvoxel_occ_head.py b/models/EmbodiedScan/embodiedscan/models/dense_heads/imvoxel_occ_head.py similarity index 99% rename from embodiedscan/models/dense_heads/imvoxel_occ_head.py rename to models/EmbodiedScan/embodiedscan/models/dense_heads/imvoxel_occ_head.py index 7fd661c..1c133f4 100644 --- a/embodiedscan/models/dense_heads/imvoxel_occ_head.py +++ b/models/EmbodiedScan/embodiedscan/models/dense_heads/imvoxel_occ_head.py @@ -5,15 +5,14 @@ import torch import torch.nn as nn import torch.nn.functional as F -from mmcv.cnn import build_conv_layer -from mmengine.model import BaseModule -from torch import Tensor - from embodiedscan.models.losses.occ_loss import (geo_scal_loss, occ_multiscale_supervision, sem_scal_loss) from embodiedscan.registry import MODELS from embodiedscan.utils.typing_config import SampleList +from mmcv.cnn import build_conv_layer +from mmengine.model import BaseModule +from torch import Tensor @MODELS.register_module() diff --git a/embodiedscan/models/detectors/__init__.py b/models/EmbodiedScan/embodiedscan/models/detectors/__init__.py similarity index 59% rename from embodiedscan/models/detectors/__init__.py rename to models/EmbodiedScan/embodiedscan/models/detectors/__init__.py index 0e230a6..ae77732 100644 --- a/embodiedscan/models/detectors/__init__.py +++ b/models/EmbodiedScan/embodiedscan/models/detectors/__init__.py @@ -2,10 +2,11 @@ from .embodied_det3d import Embodied3DDetector from .embodied_occ import EmbodiedOccPredictor from .sparse_featfusion_grounder import SparseFeatureFusion3DGrounder +from .sparse_featfusion_grounder_mod import SparseFeatureFusion3DGrounderMod from .sparse_featfusion_single_stage import \ SparseFeatureFusionSingleStage3DDetector __all__ = [ - 'Embodied3DDetector', 'EmbodiedOccPredictor', 'DenseFusionOccPredictor', - 'SparseFeatureFusion3DGrounder', 'SparseFeatureFusionSingleStage3DDetector' + 'Embodied3DDetector', 'SparseFeatureFusionSingleStage3DDetector', + 'SparseFeatureFusion3DGrounder', 'SparseFeatureFusion3DGrounderMod' ] diff --git a/embodiedscan/models/detectors/dense_fusion_occ.py b/models/EmbodiedScan/embodiedscan/models/detectors/dense_fusion_occ.py similarity index 99% rename from embodiedscan/models/detectors/dense_fusion_occ.py rename to models/EmbodiedScan/embodiedscan/models/detectors/dense_fusion_occ.py index b56b8fb..5c3ca9d 100644 --- a/embodiedscan/models/detectors/dense_fusion_occ.py +++ b/models/EmbodiedScan/embodiedscan/models/detectors/dense_fusion_occ.py @@ -11,13 +11,12 @@ ME = None pass -from mmengine.model import BaseModel - from embodiedscan.registry import MODELS, TASK_UTILS from embodiedscan.structures.bbox_3d import get_proj_mat_by_coord_type from embodiedscan.utils import ConfigType, OptConfigType from embodiedscan.utils.typing_config import (ForwardResults, InstanceList, SampleList) +from mmengine.model import BaseModel from ..layers.fusion_layers.point_fusion import (batch_point_sample, point_sample) diff --git a/embodiedscan/models/detectors/embodied_det3d.py b/models/EmbodiedScan/embodiedscan/models/detectors/embodied_det3d.py similarity index 99% rename from embodiedscan/models/detectors/embodied_det3d.py rename to models/EmbodiedScan/embodiedscan/models/detectors/embodied_det3d.py index a6dfd40..6407e47 100644 --- a/embodiedscan/models/detectors/embodied_det3d.py +++ b/models/EmbodiedScan/embodiedscan/models/detectors/embodied_det3d.py @@ -12,14 +12,13 @@ ME = None pass -from mmengine.model import BaseModel -from mmengine.structures import InstanceData - from embodiedscan.registry import MODELS from embodiedscan.structures.bbox_3d import get_proj_mat_by_coord_type from embodiedscan.utils import ConfigType from embodiedscan.utils.typing_config import (ForwardResults, InstanceList, SampleList) +from mmengine.model import BaseModel +from mmengine.structures import InstanceData from ..layers.fusion_layers.point_fusion import batch_point_sample diff --git a/embodiedscan/models/detectors/embodied_occ.py b/models/EmbodiedScan/embodiedscan/models/detectors/embodied_occ.py similarity index 99% rename from embodiedscan/models/detectors/embodied_occ.py rename to models/EmbodiedScan/embodiedscan/models/detectors/embodied_occ.py index 85a7617..75ff6cb 100644 --- a/embodiedscan/models/detectors/embodied_occ.py +++ b/models/EmbodiedScan/embodiedscan/models/detectors/embodied_occ.py @@ -11,13 +11,12 @@ ME = None pass -from mmengine.model import BaseModel - from embodiedscan.registry import MODELS, TASK_UTILS from embodiedscan.structures.bbox_3d import get_proj_mat_by_coord_type from embodiedscan.utils import ConfigType, OptConfigType from embodiedscan.utils.typing_config import (ForwardResults, InstanceList, SampleList) +from mmengine.model import BaseModel from ..layers.fusion_layers.point_fusion import batch_point_sample diff --git a/embodiedscan/models/detectors/sparse_featfusion_grounder.py b/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_grounder.py similarity index 96% rename from embodiedscan/models/detectors/sparse_featfusion_grounder.py rename to models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_grounder.py index 1241ec4..0f6850a 100644 --- a/embodiedscan/models/detectors/sparse_featfusion_grounder.py +++ b/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_grounder.py @@ -1,5 +1,5 @@ # Copyright (c) OpenRobotLab. All rights reserved. -# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/mmdet3d/models/detectors/single_stage_sparse.py # noqa +# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/embodiedscan/models/detectors/single_stage_sparse.py # noqa from typing import Dict, List, Optional, Tuple, Union import torch @@ -13,10 +13,6 @@ ME = None pass -from mmengine.model import BaseModel -from mmengine.structures import InstanceData -from transformers import RobertaModel, RobertaTokenizerFast - from embodiedscan.models.layers import SparseFeatureFusionTransformerDecoder from embodiedscan.models.layers.fusion_layers.point_fusion import ( batch_point_sample, point_sample) @@ -25,6 +21,9 @@ from embodiedscan.utils import ConfigType, OptConfigType from embodiedscan.utils.typing_config import (ForwardResults, InstanceList, OptSampleList, SampleList) +from mmengine.model import BaseModel +from mmengine.structures import InstanceData +from transformers import RobertaModel, RobertaTokenizerFast @MODELS.register_module() @@ -173,6 +172,20 @@ def convert_sparse_feature(self, x: List[Tensor], batch_size: int): return batch_features_list, batch_coords_list + def _replace_tensors_with_shape_and_dtype(self, obj): + if isinstance(obj, list): + return f'{len(obj)} copies of ' + self._replace_tensors_with_shape_and_dtype( + obj[0]) + elif isinstance(obj, dict): + return { + key: self._replace_tensors_with_shape_and_dtype(value) + for key, value in obj.items() + } + elif isinstance(obj, torch.Tensor): + return str((obj.shape, obj.dtype)) + else: + return str(type(obj)) + def extract_feat( self, batch_inputs_dict: Dict[str, Tensor], batch_data_samples: SampleList @@ -191,6 +204,11 @@ def extract_feat( and for inside 3D object detection, usually a dict containing features will be obtained. """ + # from copy import deepcopy + # show = deepcopy(batch_inputs_dict) + # show = self._replace_tensors_with_shape_and_dtype(show) + # print(show) + # exit() points = batch_inputs_dict['points'] # construct sparse tensor and features if self.use_xyz_feat: @@ -201,7 +219,10 @@ def extract_feat( coordinates, features = ME.utils.batch_sparse_collate( [(p[:, :3] / self.voxel_size, p[:, 3:]) for p in points], device=points[0].device) - + # print(f"coordinates: {coordinates.dtype} {coordinates.shape}") + # print(f"features: {features.dtype}, {features.shape}") + # features: torch.float32, torch.Size([100000, 3]) + # coordinates: torch.int32 torch.Size([100000, 4]) x = ME.SparseTensor(coordinates=coordinates, features=features) x = self.backbone_3d(x) @@ -464,6 +485,7 @@ def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, Returns: dict: A dictionary of loss components. """ + text_prompts = [ data_samples.text for data_samples in batch_data_samples ] # txt list @@ -589,6 +611,8 @@ def create_positive_map(self, tokenized, tokens_positive, for j, tok_list in enumerate(tokens_positive): for (beg, end) in tok_list: + if end == 0: + continue try: beg_pos = tokenized.char_to_token(batch_idx, beg) end_pos = tokenized.char_to_token(batch_idx, end - 1) diff --git a/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_grounder_mod.py b/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_grounder_mod.py new file mode 100644 index 0000000..58c4a01 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_grounder_mod.py @@ -0,0 +1,702 @@ +# Copyright (c) OpenRobotLab. All rights reserved. +# Adapted from https://github.com/SamsungLabs/fcaf3d/blob/master/embodiedscan/models/detectors/single_stage_sparse.py # noqa +from typing import Dict, List, Optional, Tuple, Union + +import torch +import torch.nn as nn +from torch import Tensor + +try: + import MinkowskiEngine as ME +except ImportError: + # Please follow getting_started.md to install MinkowskiEngine. + ME = None + pass + +from embodiedscan.models.layers import SparseFeatureFusionTransformerDecoder +from embodiedscan.models.layers.fusion_layers.point_fusion import ( + batch_point_sample, point_sample) +from embodiedscan.registry import MODELS +from embodiedscan.structures.bbox_3d import get_proj_mat_by_coord_type +from embodiedscan.utils import ConfigType, OptConfigType +from embodiedscan.utils.typing_config import (ForwardResults, InstanceList, + OptSampleList, SampleList) +from mmengine.model import BaseModel +from mmengine.structures import InstanceData +from transformers import RobertaModel, RobertaTokenizerFast + + +@MODELS.register_module() +class SparseFeatureFusion3DGrounderMod(BaseModel): + """SparseFusionSingleStage3DDetector. + + Args: + backbone (dict): Config dict of detector's backbone. + neck (dict, optional): Config dict of neck. Defaults to None. + bbox_head (dict, optional): Config dict of box head. Defaults to None. + train_cfg (dict, optional): Config dict of training hyper-parameters. + Defaults to None. + test_cfg (dict, optional): Config dict of test hyper-parameters. + Defaults to None. + data_preprocessor (dict or ConfigDict, optional): The pre-process + config of :class:`BaseDataPreprocessor`. it usually includes, + ``pad_size_divisor``, ``pad_value``, ``mean`` and ``std``. + init_cfg (dict or ConfigDict, optional): the config to control the + initialization. Defaults to None. + """ + _version = 2 + + def __init__( + self, + # backbone: ConfigType, + backbone_3d: ConfigType, + bbox_head: ConfigType, + neck: ConfigType = None, + neck_3d: ConfigType = None, + neck_lidar: ConfigType = None, + decoder: ConfigType = None, + voxel_size: float = 0.01, + num_queries: int = 512, + coord_type: str = 'CAMERA', + train_cfg: OptConfigType = None, + test_cfg: OptConfigType = None, + data_preprocessor: OptConfigType = None, + use_xyz_feat: bool = False, + init_cfg: OptConfigType = None): + super().__init__(data_preprocessor=data_preprocessor, + init_cfg=init_cfg) + # self.backbone = MODELS.build(backbone) + self.backbone_3d = MODELS.build(backbone_3d) + if neck is not None: + self.neck = MODELS.build(neck) + if neck_3d is not None: + self.neck_3d = MODELS.build(neck_3d) + if neck_lidar is not None: + self.neck_lidar = MODELS.build(neck_lidar) + bbox_head.update(train_cfg=train_cfg) + bbox_head.update(test_cfg=test_cfg) + self.bbox_head = MODELS.build(bbox_head) + self.decoder = decoder + self.coord_type = coord_type + self.train_cfg = train_cfg + self.test_cfg = test_cfg + self.num_queries = num_queries + if hasattr(self.bbox_head.contrastive_cfg, 'max_text_len'): + self.max_num_entities = self.bbox_head.contrastive_cfg.max_text_len + else: + self.max_num_entities = 256 + if ME is None: + raise ImportError( + 'Please follow `getting_started.md` to install MinkowskiEngine.`' # noqa: E501 + ) + self.voxel_size = voxel_size + self.use_xyz_feat = use_xyz_feat + self._init_layers() + + def _init_layers(self) -> None: + """Initialize layers except for backbone, neck and bbox_head.""" + # text modules + t_type = 'roberta-base' + self.tokenizer = RobertaTokenizerFast.from_pretrained(t_type) + self.text_encoder = RobertaModel.from_pretrained(t_type) + + self.decoder = SparseFeatureFusionTransformerDecoder(**self.decoder) + # map the text feature to the target dimension number + self.embed_dims = self.decoder.embed_dims + self.text_feat_map = nn.Linear(self.text_encoder.config.hidden_size, + self.embed_dims, + bias=True) + + @property + def with_neck(self): + """Whether the detector has a 2D backbone.""" + return hasattr(self, 'neck') and self.neck is not None + + @property + def with_neck_3d(self): + """Whether the detector has a 3D neck.""" + return hasattr(self, 'neck_3d') and self.neck_3d is not None + + @property + def with_neck_lidar(self): + """Whether the detector has a 2D backbone.""" + return hasattr(self, 'neck_lidar') and self.neck_lidar is not None + + def convert_sparse_feature(self, x: List[Tensor], batch_size: int): + """Convert SparseTensor to pytorch tensor. + + Args: + batch_inputs_dict (dict): The model input dict which includes + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + + Returns: + tuple[Tensor] | dict: For outside 3D object detection, we + typically obtain a tuple of features from the backbone + neck, + and for inside 3D object detection, usually a dict containing + features will be obtained. + """ + + batch_features_list = [[] + for _ in range(batch_size)] # list of features + batch_coords_list = [[] + for _ in range(batch_size)] # list of coordinates + + # for each level of sparsetensor feature + for sparse_tensor in x: + # extract non-zero features + features = sparse_tensor.F + # Obtain the coordinates of batch decomposition + # remember x self.voxel_size + decomposed_coords = [ + coords * self.voxel_size + for coords in sparse_tensor.decomposed_coordinates + ] + + for batch_idx, coords in enumerate(decomposed_coords): + # Since decomposed_coordinates are already separated + # by batches, we can use them directly. + batch_features = features[sparse_tensor.C[:, 0] == batch_idx] + batch_features_list[batch_idx].append(batch_features) + batch_coords_list[batch_idx].append(coords) + + batch_features_list = [ + torch.cat(features, dim=0) for features in batch_features_list + ] + batch_coords_list = [ + torch.cat(coords, dim=0) for coords in batch_coords_list + ] + + return batch_features_list, batch_coords_list + + def _replace_tensors_with_shape_and_dtype(self, obj): + if isinstance(obj, list): + return f'{len(obj)} copies of ' + self._replace_tensors_with_shape_and_dtype( + obj[0]) + elif isinstance(obj, dict): + return { + key: self._replace_tensors_with_shape_and_dtype(value) + for key, value in obj.items() + } + elif isinstance(obj, torch.Tensor): + return str((obj.shape, obj.dtype)) + else: + return str(type(obj)) + + def extract_feat( + self, batch_inputs_dict: Dict[str, + Tensor], batch_data_samples: SampleList + ) -> Union[Tuple[torch.Tensor], Dict[str, Tensor]]: + """Directly extract features from the backbone+neck. + + Args: + batch_inputs_dict (dict): The model input dict which includes + 'points' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + + Returns: + tuple[Tensor] | dict: For outside 3D object detection, we + typically obtain a tuple of features from the backbone + neck, + and for inside 3D object detection, usually a dict containing + features will be obtained. + """ + # from copy import deepcopy + # show = deepcopy(batch_inputs_dict) + # show = self._replace_tensors_with_shape_and_dtype(show) + # print(show) + + points = batch_inputs_dict['points'] + assert isinstance(points, list) + batch_size = len(points) + # construct sparse tensor and features + # whether use xyz into the feat + if self.use_xyz_feat: + coordinates, features = ME.utils.batch_sparse_collate( + [(p[:, :3] / self.voxel_size, p) for p in points], + device=points[0].device) + else: + coordinates, features = ME.utils.batch_sparse_collate( + [(p[:, :3] / self.voxel_size, p[:, 3:]) for p in points], + device=points[0].device) + # lry-debugging + # print(f"coordinates: {coordinates.dtype} {coordinates.shape}") + # print(f"features: {features.dtype}, {features.shape}") + + x = ME.SparseTensor(coordinates=coordinates, features=features) + + x = self.backbone_3d(x) + num_levels = len(x) + num_samples = len(x[0].decomposed_coordinates) + + if self.with_neck_lidar: + x = self.neck_lidar(x) + # channel mapper feature of different level to the fixed number + feats, scores, coords = self.neck_3d(x, batch_size) + + return feats, scores, coords + + def forward_transformer(self, + point_feats: List[Tensor], + scores: List[Tensor], + point_xyz: List[Tensor], + text_dict: Dict, + batch_data_samples: OptSampleList = None) -> Dict: + decoder_inputs_dict, head_inputs_dict = self.pre_decoder( + point_feats, scores, point_xyz, **text_dict) + decoder_outputs_dict = self.forward_decoder(**decoder_inputs_dict) + head_inputs_dict.update(decoder_outputs_dict) + return head_inputs_dict + + def pre_decoder( + self, + feats_list: List[Tensor], + scores_list: List[Tensor], + xyz_list: List[Tensor], + text_feats: Tensor, + text_token_mask: Tensor, + batch_data_samples: OptSampleList = None, + ) -> Tuple[Dict]: + + feats_with_pos_list = [ + torch.cat((feats, pos), dim=-1) + for feats, pos in zip(feats_list, xyz_list) + ] + # batch the list of tensor + max_feats_length = max(feats.size(0) for feats in feats_with_pos_list) + min_feats_length = min(feats.size(0) for feats in feats_with_pos_list) + padding_length = [ + max_feats_length - feats.size(0) for feats in feats_with_pos_list + ] + + padded_feats_list = [] + feats_mask_list = [] + for batch_id, feats in enumerate(feats_with_pos_list): + # If padding is needed, create a padding tensor + # of the corresponding size. + if padding_length[batch_id] > 0: + padding_feats = torch.zeros(padding_length[batch_id], + feats.size(1)).to(feats.device) + padded_feats = torch.cat([feats, padding_feats], dim=0) + else: + padded_feats = feats + padded_feats_list.append(padded_feats) + feats_mask = torch.zeros(max_feats_length, + dtype=torch.bool).to(feats.device) + feats_mask[:feats.size(0)] = 1 + feats_mask_list.append(feats_mask) + + feats_with_pos = torch.stack( + padded_feats_list) # (b, max_feats_length, C+3) + feats_mask = torch.stack( + feats_mask_list).bool() # (b, max_feats_length) + + feats, coords = feats_with_pos[..., :-3], feats_with_pos[..., -3:] + + # (b, max_feats_length, max_text_length) + enc_outputs_class = self.bbox_head.cls_branches[ + self.decoder.num_layers](feats, text_feats, text_token_mask, + feats_mask) + + # calculate the min visual token sizes in the batch + topk = min(self.num_queries, min_feats_length) + topk_indices = torch.topk(enc_outputs_class.max(-1)[0], k=topk, + dim=1)[1] + + bbox_preds = self.bbox_head.reg_branches[self.decoder.num_layers]( + feats) + bbox_pred_bboxes = self.bbox_head._bbox_pred_to_bbox( + coords, bbox_preds) + + topk_query_coords = torch.gather( + coords, 1, + topk_indices.unsqueeze(-1).repeat(1, 1, 3)) + topk_pred_bboxes = torch.gather( + bbox_pred_bboxes, 1, + topk_indices.unsqueeze(-1).repeat(1, 1, 9)) + topk_feats = torch.gather( + feats, 1, + topk_indices.unsqueeze(-1).repeat(1, 1, feats.size(-1))) + + decoder_inputs_dict = dict( + query=topk_feats, + feats=feats, + feats_attention_mask=~feats_mask, + query_coords=topk_query_coords, + feats_coords=coords, + pred_bboxes=topk_pred_bboxes.detach().clone(), + text_feats=text_feats, + text_attention_mask=~text_token_mask) + + head_inputs_dict = dict(text_feats=text_feats, + text_token_mask=text_token_mask) + return decoder_inputs_dict, head_inputs_dict + + def forward_decoder(self, query: Tensor, feats: Tensor, + feats_attention_mask: Tensor, query_coords: Tensor, + feats_coords: Tensor, pred_bboxes: Tensor, + text_feats: Tensor, + text_attention_mask: Tensor) -> Dict: + """Forward with Transformer decoder. + + The forward procedure of the transformer is defined as: + 'pre_transformer' -> 'encoder' -> 'pre_decoder' -> 'decoder' + More details can be found at `TransformerDetector.forward_transformer` + in `mmdet/detector/base_detr.py`. + + Args: + query (Tensor): The queries of decoder inputs, has shape + (bs, num_queries_total, dim), where `num_queries_total` is the + sum of `num_denoising_queries` and `num_matching_queries` when + `self.training` is `True`, else `num_matching_queries`. + + Returns: + dict: The dictionary of decoder outputs, which includes the + `hidden_states` of the decoder output and `references` including + the initial and intermediate reference_points. + """ + inter_states, pred_bboxes = self.decoder( + query=query, + key=feats, + value=feats, + key_padding_mask=feats_attention_mask, + self_attn_mask=None, + cross_attn_mask=None, + query_coords=query_coords, + key_coords=feats_coords, + pred_bboxes=pred_bboxes, + text_feats=text_feats, + text_attention_mask=text_attention_mask, + bbox_head=self.bbox_head) + + decoder_outputs_dict = dict(hidden_states=inter_states, + all_layers_pred_bboxes=pred_bboxes) + return decoder_outputs_dict + + def loss(self, batch_inputs_dict: dict, batch_data_samples: SampleList, + **kwargs) -> Union[dict, list]: + """Calculate losses from a batch of inputs dict and data samples. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + dict: A dictionary of loss components. + """ + + text_prompts = [ + data_samples.text for data_samples in batch_data_samples + ] # txt list + + tokens_positive = [ + data_samples.tokens_positive for data_samples in batch_data_samples + ] + + tokenized = self.tokenizer.batch_encode_plus( + text_prompts, padding='longest', + return_tensors='pt').to(batch_inputs_dict['points'][0].device) + positive_maps = self.get_positive_map(tokenized, tokens_positive) + + encoded_text = self.text_encoder(**tokenized) + text_feats = self.text_feat_map(encoded_text.last_hidden_state) + text_token_mask = tokenized.attention_mask.bool() + text_dict = dict() + text_dict['text_feats'] = text_feats + text_dict['text_token_mask'] = text_token_mask # (bs, max_text_length) + # mind attention mask that we get from huggingface is inverse + # because its the opposite in pytorch transformer + # text_dict['tokenized'] = tokenized + for i, data_samples in enumerate(batch_data_samples): + positive_map = positive_maps[i].to( + batch_inputs_dict['points'] + [0].device).bool().float() # (1, max_text_length) + if not isinstance(positive_maps, List): + positive_map = positive_map.unsqueeze(0) + text_token_mask = text_dict['text_token_mask'][ + i] # (max_text_length) + data_samples.gt_instances_3d.positive_maps = positive_map + # (1, max_text_length) + data_samples.gt_instances_3d.text_token_mask = \ + text_token_mask.unsqueeze(0).repeat( + len(positive_map), 1) + + point_feats, scores, point_xyz = self.extract_feat( + batch_inputs_dict, batch_data_samples) + head_inputs_dict = self.forward_transformer(point_feats, scores, + point_xyz, text_dict, + batch_data_samples) + losses = self.bbox_head.loss(**head_inputs_dict, + batch_data_samples=batch_data_samples) + return losses + + def predict(self, batch_inputs_dict, batch_data_samples): + text_prompts = [ + data_samples.text for data_samples in batch_data_samples + ] # txt list + + point_feats, scores, point_xyz = self.extract_feat( + batch_inputs_dict, batch_data_samples) + + # extract text feats + tokenized = self.tokenizer.batch_encode_plus( + text_prompts, padding='longest', + return_tensors='pt').to(batch_inputs_dict['points'][0].device) + + if 'tokens_positive' in batch_data_samples[0]: + tokens_positive = [ + data_samples.tokens_positive + for data_samples in batch_data_samples + ] + else: + # hack a pseudo tokens_positive during format-only inference + tokens_positive = [[[[0, 1]]] + for _ in range(len(batch_data_samples))] + # print(tokens_positive) + positive_maps = self.get_positive_map(tokenized, tokens_positive) + + encoded_text = self.text_encoder(**tokenized) + text_feats = self.text_feat_map(encoded_text.last_hidden_state) + text_token_mask = tokenized.attention_mask.bool() + text_dict = dict() + text_dict['text_feats'] = text_feats + text_dict['text_token_mask'] = text_token_mask # (bs, max_text_length) + # mind attention mask that we get from huggingface is inverse + # because its the opposite in pytorch transformer + # text_dict['tokenized'] = tokenized + for i, data_samples in enumerate(batch_data_samples): + positive_map = positive_maps[i].to( + batch_inputs_dict['points'] + [0].device).bool().float() # (1, max_text_length) + if not isinstance(positive_maps, List): + positive_map = positive_map.unsqueeze(0) + text_token_mask = text_dict['text_token_mask'][ + i] # (max_text_length) + data_samples.gt_instances_3d.positive_maps = positive_map + # (1, max_text_length) + data_samples.gt_instances_3d.text_token_mask = \ + text_token_mask.unsqueeze(0).repeat( + len(positive_map), 1) + + head_inputs_dict = self.forward_transformer(point_feats, scores, + point_xyz, text_dict, + batch_data_samples) + results_list = self.bbox_head.predict( + **head_inputs_dict, batch_data_samples=batch_data_samples) + + for data_sample, pred_instances_3d in zip(batch_data_samples, + results_list): + data_sample.pred_instances_3d = pred_instances_3d + return batch_data_samples + + def create_positive_map(self, tokenized, tokens_positive, + batch_idx) -> Tensor: + """construct a map such that positive_map[i,j] = True + if box i is associated to token j + + Args: + tokenized: The tokenized input. + tokens_positive (list): A list of token ranges + associated with positive boxes. + + Returns: + torch.Tensor: The positive map. + + Raises: + Exception: If an error occurs during token-to-char mapping. + """ + # positive_map = [tp_num, text_len] , all these pos return with value 1 + # max number of tokens + # print("tokens_positive", tokens_positive) + positive_map = torch.zeros( + (len(tokens_positive), self.max_num_entities), dtype=torch.float) + + for j, tok_list in enumerate(tokens_positive): + # print("tok_list", tok_list) + for (beg, end) in tok_list: + if end <= 0: + continue + try: + beg_pos = tokenized.char_to_token(batch_idx, beg) + end_pos = tokenized.char_to_token(batch_idx, end - 1) + except Exception as e: + print('beg:', beg, 'end:', end) + print('token_positive:', tokens_positive) + raise e + if beg_pos is None: + try: + beg_pos = tokenized.char_to_token(batch_idx, beg + 1) + if beg_pos is None: + beg_pos = tokenized.char_to_token( + batch_idx, beg + 2) + except Exception: + beg_pos = None + if end_pos is None: + try: + end_pos = tokenized.char_to_token(batch_idx, end - 2) + if end_pos is None: + end_pos = tokenized.char_to_token( + batch_idx, end - 3) + except Exception: + end_pos = None + if beg_pos is None or end_pos is None: + continue + + assert beg_pos is not None and end_pos is not None + positive_map[j, beg_pos:end_pos + 1].fill_(1) + + return positive_map / (positive_map.sum(-1)[:, None] + 1e-6) + + def get_positive_map(self, tokenized, tokens_positive): + # each data sample can contain a single box or multiple grounding boxes + # Unify the single and multi grounding box cases + positive_maps = [] + for idx, tp in enumerate(tokens_positive): + # print(tp) + positive_map = self.create_positive_map(tokenized, tp, idx) + positive_maps.append(positive_map) + return positive_maps + + def forward(self, + inputs: Union[dict, List[dict]], + data_samples: Optional[List] = None, + mode: str = 'tensor', + **kwargs) -> ForwardResults: + """The unified entry for a forward process in both training and test. + + The method should accept three modes: "tensor", "predict" and "loss": + + - "tensor": Forward the whole network and return tensor or tuple of + tensor without any post-processing, same as a common nn.Module. + - "predict": Forward and return the predictions, which are fully + processed to a list of :obj:`Det3DDataSample`. + - "loss": Forward and return a dict of losses according to the given + inputs and data samples. + + Note that this method doesn't handle neither back propagation nor + optimizer updating, which are done in the :meth:`train_step`. + + Args: + inputs (dict | list[dict]): When it is a list[dict], the + outer list indicate the test time augmentation. Each + dict contains batch inputs + which include 'points' and 'imgs' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor): Image tensor has shape (B, C, H, W). + data_samples (list[:obj:`Det3DDataSample`], + list[list[:obj:`Det3DDataSample`]], optional): The + annotation data of every samples. When it is a list[list], the + outer list indicate the test time augmentation, and the + inter list indicate the batch. Otherwise, the list simply + indicate the batch. Defaults to None. + mode (str): Return what kind of value. Defaults to 'tensor'. + + Returns: + The return type depends on ``mode``. + + - If ``mode="tensor"``, return a tensor or a tuple of tensor. + - If ``mode="predict"``, return a list of :obj:`Det3DDataSample`. + - If ``mode="loss"``, return a dict of tensor. + """ + if mode == 'loss': + return self.loss(inputs, data_samples, **kwargs) + elif mode == 'predict': + return self.predict(inputs, data_samples, **kwargs) + else: + raise RuntimeError(f'Invalid mode "{mode}". ' + 'Only supports loss, predict and tensor mode') + + def _forward(self, + batch_inputs_dict: dict, + batch_data_samples: OptSampleList = None, + **kwargs) -> Tuple[List[torch.Tensor]]: + """Network forward process. Usually includes backbone, neck and head + forward without any post-processing. + + Args: + batch_inputs_dict (dict): The model input dict which include + 'points', 'img' keys. + + - points (list[torch.Tensor]): Point cloud of each sample. + - imgs (torch.Tensor, optional): Image of each sample. + + batch_data_samples (List[:obj:`Det3DDataSample`]): The Data + Samples. It usually includes information such as + `gt_instance_3d`, `gt_panoptic_seg_3d` and `gt_sem_seg_3d`. + + Returns: + tuple[list]: A tuple of features from ``bbox_head`` forward. + """ + x = self.extract_feat(batch_inputs_dict, batch_data_samples) + results = self.bbox_head.forward(x) + return results + + def add_pred_to_datasample( + self, + data_samples: SampleList, + data_instances_3d: Optional[InstanceList] = None, + data_instances_2d: Optional[InstanceList] = None, + ) -> SampleList: + """Convert results list to `Det3DDataSample`. + + Subclasses could override it to be compatible for some multi-modality + 3D detectors. + + Args: + data_samples (list[:obj:`Det3DDataSample`]): The input data. + data_instances_3d (list[:obj:`InstanceData`], optional): 3D + Detection results of each sample. + data_instances_2d (list[:obj:`InstanceData`], optional): 2D + Detection results of each sample. + + Returns: + list[:obj:`Det3DDataSample`]: Detection results of the + input. Each Det3DDataSample usually contains + 'pred_instances_3d'. And the ``pred_instances_3d`` normally + contains following keys. + + - scores_3d (Tensor): Classification scores, has a shape + (num_instance, ) + - labels_3d (Tensor): Labels of 3D bboxes, has a shape + (num_instances, ). + - bboxes_3d (Tensor): Contains a tensor with shape + (num_instances, C) where C >=7. + + When there are image prediction in some models, it should + contains `pred_instances`, And the ``pred_instances`` normally + contains following keys. + + - scores (Tensor): Classification scores of image, has a shape + (num_instance, ) + - labels (Tensor): Predict Labels of 2D bboxes, has a shape + (num_instances, ). + - bboxes (Tensor): Contains a tensor with shape + (num_instances, 4). + """ + + assert (data_instances_2d is not None) or \ + (data_instances_3d is not None),\ + 'please pass at least one type of data_samples' + + if data_instances_2d is None: + data_instances_2d = [ + InstanceData() for _ in range(len(data_instances_3d)) + ] + if data_instances_3d is None: + data_instances_3d = [ + InstanceData() for _ in range(len(data_instances_2d)) + ] + + for i, data_sample in enumerate(data_samples): + data_sample.pred_instances_3d = data_instances_3d[i] + data_sample.pred_instances = data_instances_2d[i] + return data_samples diff --git a/embodiedscan/models/detectors/sparse_featfusion_single_stage.py b/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_single_stage.py similarity index 99% rename from embodiedscan/models/detectors/sparse_featfusion_single_stage.py rename to models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_single_stage.py index fe04702..9cb617a 100644 --- a/embodiedscan/models/detectors/sparse_featfusion_single_stage.py +++ b/models/EmbodiedScan/embodiedscan/models/detectors/sparse_featfusion_single_stage.py @@ -12,14 +12,13 @@ ME = None pass -from mmengine.model import BaseModel -from mmengine.structures import InstanceData - from embodiedscan.registry import MODELS from embodiedscan.structures.bbox_3d import get_proj_mat_by_coord_type from embodiedscan.utils import ConfigType from embodiedscan.utils.typing_config import (ForwardResults, InstanceList, SampleList) +from mmengine.model import BaseModel +from mmengine.structures import InstanceData from ..layers.fusion_layers.point_fusion import (batch_point_sample, point_sample) diff --git a/models/EmbodiedScan/embodiedscan/models/layers/__init__.py b/models/EmbodiedScan/embodiedscan/models/layers/__init__.py new file mode 100644 index 0000000..2df75e0 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/models/layers/__init__.py @@ -0,0 +1,8 @@ +from .box3d_nms import (aligned_3d_nms, box3d_multiclass_nms, circle_nms, + nms_bev, nms_normal_bev) +from .ground_transformer import SparseFeatureFusionTransformerDecoder + +__all__ = [ + 'SparseFeatureFusionTransformerDecoder', 'box3d_multiclass_nms', + 'aligned_3d_nms', 'circle_nms', 'nms_bev', 'nms_normal_bev' +] diff --git a/models/EmbodiedScan/embodiedscan/models/layers/box3d_nms.py b/models/EmbodiedScan/embodiedscan/models/layers/box3d_nms.py new file mode 100644 index 0000000..09e78b8 --- /dev/null +++ b/models/EmbodiedScan/embodiedscan/models/layers/box3d_nms.py @@ -0,0 +1,295 @@ +# Copyright (c) OpenMMLab. All rights reserved. +from typing import Optional, Tuple + +import numba +import numpy as np +import torch +from mmcv.ops import nms, nms_rotated +from torch import Tensor + + +def box3d_multiclass_nms( + mlvl_bboxes: Tensor, + mlvl_bboxes_for_nms: Tensor, + mlvl_scores: Tensor, + score_thr: float, + max_num: int, + cfg: dict, + mlvl_dir_scores: Optional[Tensor] = None, + mlvl_attr_scores: Optional[Tensor] = None, + mlvl_bboxes2d: Optional[Tensor] = None) -> Tuple[Tensor]: + """Multi-class NMS for 3D boxes. The IoU used for NMS is defined as the 2D + IoU between BEV boxes. + + Args: + mlvl_bboxes (Tensor): Multi-level boxes with shape (N, M). + M is the dimensions of boxes. + mlvl_bboxes_for_nms (Tensor): Multi-level boxes with shape (N, 5) + ([x1, y1, x2, y2, ry]). N is the number of boxes. + The coordinate system of the BEV boxes is counterclockwise. + mlvl_scores (Tensor): Multi-level boxes with shape (N, C + 1). + N is the number of boxes. C is the number of classes. + score_thr (float): Score threshold to filter boxes with low confidence. + max_num (int): Maximum number of boxes will be kept. + cfg (dict): Configuration dict of NMS. + mlvl_dir_scores (Tensor, optional): Multi-level scores of direction + classifier. Defaults to None. + mlvl_attr_scores (Tensor, optional): Multi-level scores of attribute + classifier. Defaults to None. + mlvl_bboxes2d (Tensor, optional): Multi-level 2D bounding boxes. + Defaults to None. + + Returns: + Tuple[Tensor]: Return results after nms, including 3D bounding boxes, + scores, labels, direction scores, attribute scores (optional) and + 2D bounding boxes (optional). + """ + # do multi class nms + # the fg class id range: [0, num_classes-1] + num_classes = mlvl_scores.shape[1] - 1 + bboxes = [] + scores = [] + labels = [] + dir_scores = [] + attr_scores = [] + bboxes2d = [] + for i in range(0, num_classes): + # get bboxes and scores of this class + cls_inds = mlvl_scores[:, i] > score_thr + if not cls_inds.any(): + continue + + _scores = mlvl_scores[cls_inds, i] + _bboxes_for_nms = mlvl_bboxes_for_nms[cls_inds, :] + + if cfg.use_rotate_nms: + nms_func = nms_bev + else: + nms_func = nms_normal_bev + + selected = nms_func(_bboxes_for_nms, _scores, cfg.nms_thr) + _mlvl_bboxes = mlvl_bboxes[cls_inds, :] + bboxes.append(_mlvl_bboxes[selected]) + scores.append(_scores[selected]) + cls_label = mlvl_bboxes.new_full((len(selected), ), + i, + dtype=torch.long) + labels.append(cls_label) + + if mlvl_dir_scores is not None: + _mlvl_dir_scores = mlvl_dir_scores[cls_inds] + dir_scores.append(_mlvl_dir_scores[selected]) + if mlvl_attr_scores is not None: + _mlvl_attr_scores = mlvl_attr_scores[cls_inds] + attr_scores.append(_mlvl_attr_scores[selected]) + if mlvl_bboxes2d is not None: + _mlvl_bboxes2d = mlvl_bboxes2d[cls_inds] + bboxes2d.append(_mlvl_bboxes2d[selected]) + + if bboxes: + bboxes = torch.cat(bboxes, dim=0) + scores = torch.cat(scores, dim=0) + labels = torch.cat(labels, dim=0) + if mlvl_dir_scores is not None: + dir_scores = torch.cat(dir_scores, dim=0) + if mlvl_attr_scores is not None: + attr_scores = torch.cat(attr_scores, dim=0) + if mlvl_bboxes2d is not None: + bboxes2d = torch.cat(bboxes2d, dim=0) + if bboxes.shape[0] > max_num: + _, inds = scores.sort(descending=True) + inds = inds[:max_num] + bboxes = bboxes[inds, :] + labels = labels[inds] + scores = scores[inds] + if mlvl_dir_scores is not None: + dir_scores = dir_scores[inds] + if mlvl_attr_scores is not None: + attr_scores = attr_scores[inds] + if mlvl_bboxes2d is not None: + bboxes2d = bboxes2d[inds] + else: + bboxes = mlvl_scores.new_zeros((0, mlvl_bboxes.size(-1))) + scores = mlvl_scores.new_zeros((0, )) + labels = mlvl_scores.new_zeros((0, ), dtype=torch.long) + if mlvl_dir_scores is not None: + dir_scores = mlvl_scores.new_zeros((0, )) + if mlvl_attr_scores is not None: + attr_scores = mlvl_scores.new_zeros((0, )) + if mlvl_bboxes2d is not None: + bboxes2d = mlvl_scores.new_zeros((0, 4)) + + results = (bboxes, scores, labels) + + if mlvl_dir_scores is not None: + results = results + (dir_scores, ) + if mlvl_attr_scores is not None: + results = results + (attr_scores, ) + if mlvl_bboxes2d is not None: + results = results + (bboxes2d, ) + + return results + + +def aligned_3d_nms(boxes: Tensor, scores: Tensor, classes: Tensor, + thresh: float) -> Tensor: + """3D NMS for aligned boxes. + + Args: + boxes (Tensor): Aligned box with shape [N, 6]. + scores (Tensor): Scores of each box. + classes (Tensor): Class of each box. + thresh (float): IoU threshold for nms. + + Returns: + Tensor: Indices of selected boxes. + """ + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + zero = boxes.new_zeros(1, ) + + score_sorted = torch.argsort(scores) + pick = [] + while (score_sorted.shape[0] != 0): + last = score_sorted.shape[0] + i = score_sorted[-1] + pick.append(i) + + xx1 = torch.max(x1[i], x1[score_sorted[:last - 1]]) + yy1 = torch.max(y1[i], y1[score_sorted[:last - 1]]) + zz1 = torch.max(z1[i], z1[score_sorted[:last - 1]]) + xx2 = torch.min(x2[i], x2[score_sorted[:last - 1]]) + yy2 = torch.min(y2[i], y2[score_sorted[:last - 1]]) + zz2 = torch.min(z2[i], z2[score_sorted[:last - 1]]) + classes1 = classes[i] + classes2 = classes[score_sorted[:last - 1]] + inter_l = torch.max(zero, xx2 - xx1) + inter_w = torch.max(zero, yy2 - yy1) + inter_h = torch.max(zero, zz2 - zz1) + + inter = inter_l * inter_w * inter_h + iou = inter / (area[i] + area[score_sorted[:last - 1]] - inter) + iou = iou * (classes1 == classes2).float() + score_sorted = score_sorted[torch.nonzero(iou <= thresh, + as_tuple=False).flatten()] + + indices = boxes.new_tensor(pick, dtype=torch.long) + return indices + + +@numba.jit(nopython=True) +def circle_nms(dets: Tensor, thresh: float, post_max_size: int = 83) -> Tensor: + """Circular NMS. + + An object is only counted as positive if no other center with a higher + confidence exists within a radius r using a bird-eye view distance metric. + + Args: + dets (Tensor): Detection results with the shape of [N, 3]. + thresh (float): Value of threshold. + post_max_size (int): Max number of prediction to be kept. + Defaults to 83. + + Returns: + Tensor: Indexes of the detections to be kept. + """ + x1 = dets[:, 0] + y1 = dets[:, 1] + scores = dets[:, 2] + order = scores.argsort()[::-1].astype(np.int32) # highest->lowest + ndets = dets.shape[0] + suppressed = np.zeros((ndets), dtype=np.int32) + keep = [] + for _i in range(ndets): + i = order[_i] # start with highest score box + if suppressed[ + i] == 1: # if any box have enough iou with this, remove it + continue + keep.append(i) + for _j in range(_i + 1, ndets): + j = order[_j] + if suppressed[j] == 1: + continue + # calculate center distance between i and j box + dist = (x1[i] - x1[j])**2 + (y1[i] - y1[j])**2 + + # ovr = inter / areas[j] + if dist <= thresh: + suppressed[j] = 1 + + if post_max_size < len(keep): + return keep[:post_max_size] + + return keep + + +# This function duplicates functionality of mmcv.ops.iou_3d.nms_bev +# from mmcv<=1.5, but using cuda ops from mmcv.ops.nms.nms_rotated. +# Nms api will be unified in mmdetection3d one day. +def nms_bev(boxes: Tensor, + scores: Tensor, + thresh: float, + pre_max_size: Optional[int] = None, + post_max_size: Optional[int] = None) -> Tensor: + """NMS function GPU implementation (for BEV boxes). The overlap of two + boxes for IoU calculation is defined as the exact overlapping area of the + two boxes. In this function, one can also set ``pre_max_size`` and + ``post_max_size``. + + Args: + boxes (Tensor): Input boxes with the shape of [N, 5] + ([x1, y1, x2, y2, ry]). + scores (Tensor): Scores of boxes with the shape of [N]. + thresh (float): Overlap threshold of NMS. + pre_max_size (int, optional): Max size of boxes before NMS. + Defaults to None. + post_max_size (int, optional): Max size of boxes after NMS. + Defaults to None. + + Returns: + Tensor: Indexes after NMS. + """ + assert boxes.size(1) == 5, 'Input boxes shape should be [N, 5]' + order = scores.sort(0, descending=True)[1] + if pre_max_size is not None: + order = order[:pre_max_size] + boxes = boxes[order].contiguous() + scores = scores[order] + + # xyxyr -> back to xywhr + # note: better skip this step before nms_bev call in the future + boxes = torch.stack( + ((boxes[:, 0] + boxes[:, 2]) / 2, (boxes[:, 1] + boxes[:, 3]) / 2, + boxes[:, 2] - boxes[:, 0], boxes[:, 3] - boxes[:, 1], boxes[:, 4]), + dim=-1) + + keep = nms_rotated(boxes, scores, thresh)[1] + keep = order[keep] + if post_max_size is not None: + keep = keep[:post_max_size] + return keep + + +# This function duplicates functionality of mmcv.ops.iou_3d.nms_normal_bev +# from mmcv<=1.5, but using cuda ops from mmcv.ops.nms.nms. +# Nms api will be unified in mmdetection3d one day. +def nms_normal_bev(boxes: Tensor, scores: Tensor, thresh: float) -> Tensor: + """Normal NMS function GPU implementation (for BEV boxes). The overlap of + two boxes for IoU calculation is defined as the exact overlapping area of + the two boxes WITH their yaw angle set to 0. + + Args: + boxes (Tensor): Input boxes with shape (N, 5). + scores (Tensor): Scores of predicted boxes with shape (N). + thresh (float): Overlap threshold of NMS. + + Returns: + Tensor: Remaining indices with scores in descending order. + """ + assert boxes.shape[1] == 5, 'Input boxes shape should be [N, 5]' + return nms(boxes[:, :-1], scores, thresh)[1] diff --git a/embodiedscan/models/layers/fusion_layers/__init__.py b/models/EmbodiedScan/embodiedscan/models/layers/fusion_layers/__init__.py similarity index 100% rename from embodiedscan/models/layers/fusion_layers/__init__.py rename to models/EmbodiedScan/embodiedscan/models/layers/fusion_layers/__init__.py diff --git a/embodiedscan/models/layers/fusion_layers/point_fusion.py b/models/EmbodiedScan/embodiedscan/models/layers/fusion_layers/point_fusion.py similarity index 99% rename from embodiedscan/models/layers/fusion_layers/point_fusion.py rename to models/EmbodiedScan/embodiedscan/models/layers/fusion_layers/point_fusion.py index b10f09e..2f97f89 100644 --- a/embodiedscan/models/layers/fusion_layers/point_fusion.py +++ b/models/EmbodiedScan/embodiedscan/models/layers/fusion_layers/point_fusion.py @@ -3,18 +3,17 @@ from typing import List, Optional, Tuple, Union import torch -from mmcv.cnn import ConvModule -from mmengine.model import BaseModule -from torch import Tensor -from torch import nn as nn -from torch.nn import functional as F - from embodiedscan.registry import MODELS from embodiedscan.structures.bbox_3d import (batch_points_cam2img, get_proj_mat_by_coord_type, points_cam2img, points_img2cam) from embodiedscan.structures.points import get_points_type from embodiedscan.utils import ConfigType +from mmcv.cnn import ConvModule +from mmengine.model import BaseModule +from torch import Tensor +from torch import nn as nn +from torch.nn import functional as F def apply_3d_transformation(pcd: Tensor, diff --git a/embodiedscan/models/layers/ground_transformer/__init__.py b/models/EmbodiedScan/embodiedscan/models/layers/ground_transformer/__init__.py similarity index 100% rename from embodiedscan/models/layers/ground_transformer/__init__.py rename to models/EmbodiedScan/embodiedscan/models/layers/ground_transformer/__init__.py diff --git a/embodiedscan/models/layers/ground_transformer/decoder.py b/models/EmbodiedScan/embodiedscan/models/layers/ground_transformer/decoder.py similarity index 99% rename from embodiedscan/models/layers/ground_transformer/decoder.py rename to models/EmbodiedScan/embodiedscan/models/layers/ground_transformer/decoder.py index bf5b90e..751f161 100644 --- a/embodiedscan/models/layers/ground_transformer/decoder.py +++ b/models/EmbodiedScan/embodiedscan/models/layers/ground_transformer/decoder.py @@ -3,14 +3,13 @@ import torch import torch.nn as nn +from embodiedscan.utils import ConfigType, OptConfigType from mmcv.cnn import build_norm_layer from mmcv.cnn.bricks.transformer import FFN, MultiheadAttention from mmengine import ConfigDict from mmengine.model import BaseModule, ModuleList from torch import Tensor -from embodiedscan.utils import ConfigType, OptConfigType - try: from fairscale.nn.checkpoint import checkpoint_wrapper except Exception: diff --git a/embodiedscan/models/losses/__init__.py b/models/EmbodiedScan/embodiedscan/models/losses/__init__.py similarity index 100% rename from embodiedscan/models/losses/__init__.py rename to models/EmbodiedScan/embodiedscan/models/losses/__init__.py diff --git a/embodiedscan/models/losses/chamfer_distance.py b/models/EmbodiedScan/embodiedscan/models/losses/chamfer_distance.py similarity index 91% rename from embodiedscan/models/losses/chamfer_distance.py rename to models/EmbodiedScan/embodiedscan/models/losses/chamfer_distance.py index 924d111..979b8a0 100644 --- a/embodiedscan/models/losses/chamfer_distance.py +++ b/models/EmbodiedScan/embodiedscan/models/losses/chamfer_distance.py @@ -2,13 +2,12 @@ from typing import Optional, Tuple, Union import torch +from embodiedscan.registry import MODELS from pytorch3d.transforms import euler_angles_to_matrix from torch import Tensor from torch import nn as nn from torch.nn.functional import l1_loss, mse_loss, smooth_l1_loss -from embodiedscan.registry import MODELS - def chamfer_distance( src: Tensor, @@ -169,8 +168,9 @@ def bbox_to_corners(bbox: Tensor) -> Tensor: assert len( bbox.shape ) == 2, 'bbox must be 2D tensor of shape (N, 6) or (N, 7) or (N, 9)' + device = bbox.device if bbox.shape[-1] == 6: - rot_mat = torch.eye(3, device=bbox.device).unsqueeze(0).repeat( + rot_mat = torch.eye(3, device=device).unsqueeze(0).repeat( bbox.shape[0], 1, 1) elif bbox.shape[-1] == 7: angles = bbox[:, 6:] @@ -184,20 +184,20 @@ def bbox_to_corners(bbox: Tensor) -> Tensor: centers = bbox[:, :3].unsqueeze(1).repeat(1, 8, 1) # shape (N, 8, 3) half_sizes = bbox[:, 3:6].unsqueeze(1).repeat(1, 8, 1) / 2 # shape (N, 8, 3) - eight_corners_x = torch.tensor([1, 1, 1, 1, -1, -1, -1, -1], - device=bbox.device).unsqueeze(0).repeat( - bbox.shape[0], 1) # shape (N, 8) - eight_corners_y = torch.tensor([1, 1, -1, -1, 1, 1, -1, -1], - device=bbox.device).unsqueeze(0).repeat( - bbox.shape[0], 1) # shape (N, 8) - eight_corners_z = torch.tensor([1, -1, 1, -1, 1, -1, 1, -1], - device=bbox.device).unsqueeze(0).repeat( - bbox.shape[0], 1) # shape (N, 8) - eight_corners = torch.stack( - (eight_corners_x, eight_corners_y, eight_corners_z), - dim=-1) # shape (N, 8, 3) - eight_corners = eight_corners * half_sizes # shape (N, 8, 3) - # rot_mat: (N, 3, 3), eight_corners: (N, 8, 3) + eight_corners = [ + [0, 0, 0], + [1, 0, 0], + [1, 1, 0], + [0, 1, 0], + [0, 0, 1], + [1, 0, 1], + [1, 1, 1], + [0, 1, 1], + ] + eight_corners = torch.tensor(eight_corners, + device=device).float() # shape (8, 3) + eight_corners = (1 - 2 * torch.tensor( + eight_corners, device=device).float()) * half_sizes # shape (8, 3) rotated_corners = torch.matmul(eight_corners, rot_mat.transpose(1, 2)) # shape (N, 8, 3) return centers + rotated_corners diff --git a/embodiedscan/models/losses/match_cost.py b/models/EmbodiedScan/embodiedscan/models/losses/match_cost.py similarity index 99% rename from embodiedscan/models/losses/match_cost.py rename to models/EmbodiedScan/embodiedscan/models/losses/match_cost.py index 0ab46f8..0c7ae71 100644 --- a/embodiedscan/models/losses/match_cost.py +++ b/models/EmbodiedScan/embodiedscan/models/losses/match_cost.py @@ -2,11 +2,10 @@ from typing import Optional, Union import torch -from mmengine.structures import InstanceData -from torch import Tensor - from embodiedscan.registry import TASK_UTILS from embodiedscan.structures import EulerDepthInstance3DBoxes +from mmengine.structures import InstanceData +from torch import Tensor class BaseMatchCost: diff --git a/embodiedscan/models/losses/occ_loss.py b/models/EmbodiedScan/embodiedscan/models/losses/occ_loss.py similarity index 100% rename from embodiedscan/models/losses/occ_loss.py rename to models/EmbodiedScan/embodiedscan/models/losses/occ_loss.py diff --git a/embodiedscan/models/losses/reduce_loss.py b/models/EmbodiedScan/embodiedscan/models/losses/reduce_loss.py similarity index 100% rename from embodiedscan/models/losses/reduce_loss.py rename to models/EmbodiedScan/embodiedscan/models/losses/reduce_loss.py diff --git a/embodiedscan/models/losses/rotated_iou_loss.py b/models/EmbodiedScan/embodiedscan/models/losses/rotated_iou_loss.py similarity index 99% rename from embodiedscan/models/losses/rotated_iou_loss.py rename to models/EmbodiedScan/embodiedscan/models/losses/rotated_iou_loss.py index c992d7a..9bbf93f 100644 --- a/embodiedscan/models/losses/rotated_iou_loss.py +++ b/models/EmbodiedScan/embodiedscan/models/losses/rotated_iou_loss.py @@ -2,12 +2,11 @@ from typing import Optional import torch +from embodiedscan.registry import MODELS from mmcv.ops import diff_iou_rotated_3d from torch import Tensor from torch import nn as nn -from embodiedscan.registry import MODELS - from .reduce_loss import weighted_loss diff --git a/embodiedscan/models/necks/__init__.py b/models/EmbodiedScan/embodiedscan/models/necks/__init__.py similarity index 100% rename from embodiedscan/models/necks/__init__.py rename to models/EmbodiedScan/embodiedscan/models/necks/__init__.py diff --git a/embodiedscan/models/necks/channel_mapper.py b/models/EmbodiedScan/embodiedscan/models/necks/channel_mapper.py similarity index 100% rename from embodiedscan/models/necks/channel_mapper.py rename to models/EmbodiedScan/embodiedscan/models/necks/channel_mapper.py diff --git a/embodiedscan/models/necks/imvoxel_neck.py b/models/EmbodiedScan/embodiedscan/models/necks/imvoxel_neck.py similarity index 99% rename from embodiedscan/models/necks/imvoxel_neck.py rename to models/EmbodiedScan/embodiedscan/models/necks/imvoxel_neck.py index fd8bfb9..6e835da 100644 --- a/embodiedscan/models/necks/imvoxel_neck.py +++ b/models/EmbodiedScan/embodiedscan/models/necks/imvoxel_neck.py @@ -1,9 +1,8 @@ # Copyright (c) OpenMMLab. All rights reserved. +from embodiedscan.registry import MODELS from mmengine.model import BaseModule from torch import nn -from embodiedscan.registry import MODELS - @MODELS.register_module() class IndoorImVoxelNeck(BaseModule): diff --git a/embodiedscan/models/necks/mink_neck.py b/models/EmbodiedScan/embodiedscan/models/necks/mink_neck.py similarity index 99% rename from embodiedscan/models/necks/mink_neck.py rename to models/EmbodiedScan/embodiedscan/models/necks/mink_neck.py index f453516..652a6e9 100644 --- a/embodiedscan/models/necks/mink_neck.py +++ b/models/EmbodiedScan/embodiedscan/models/necks/mink_neck.py @@ -11,11 +11,10 @@ pass import torch +from embodiedscan.registry import MODELS from mmengine.model import BaseModule, bias_init_with_prob from torch import Tensor, nn -from embodiedscan.registry import MODELS - @MODELS.register_module() class MinkNeck(BaseModule): diff --git a/embodiedscan/models/task_modules/__init__.py b/models/EmbodiedScan/embodiedscan/models/task_modules/__init__.py similarity index 100% rename from embodiedscan/models/task_modules/__init__.py rename to models/EmbodiedScan/embodiedscan/models/task_modules/__init__.py diff --git a/embodiedscan/models/task_modules/anchor/__init__.py b/models/EmbodiedScan/embodiedscan/models/task_modules/anchor/__init__.py similarity index 100% rename from embodiedscan/models/task_modules/anchor/__init__.py rename to models/EmbodiedScan/embodiedscan/models/task_modules/anchor/__init__.py diff --git a/embodiedscan/models/task_modules/anchor/anchor_3d_generator.py b/models/EmbodiedScan/embodiedscan/models/task_modules/anchor/anchor_3d_generator.py similarity index 99% rename from embodiedscan/models/task_modules/anchor/anchor_3d_generator.py rename to models/EmbodiedScan/embodiedscan/models/task_modules/anchor/anchor_3d_generator.py index 275c2d8..4846492 100644 --- a/embodiedscan/models/task_modules/anchor/anchor_3d_generator.py +++ b/models/EmbodiedScan/embodiedscan/models/task_modules/anchor/anchor_3d_generator.py @@ -3,9 +3,8 @@ import mmengine import torch -from torch import Tensor - from embodiedscan.registry import TASK_UTILS +from torch import Tensor @TASK_UTILS.register_module() diff --git a/embodiedscan/models/task_modules/assigners/__init__.py b/models/EmbodiedScan/embodiedscan/models/task_modules/assigners/__init__.py similarity index 100% rename from embodiedscan/models/task_modules/assigners/__init__.py rename to models/EmbodiedScan/embodiedscan/models/task_modules/assigners/__init__.py diff --git a/embodiedscan/models/task_modules/assigners/hungarian_assigner.py b/models/EmbodiedScan/embodiedscan/models/task_modules/assigners/hungarian_assigner.py similarity index 99% rename from embodiedscan/models/task_modules/assigners/hungarian_assigner.py rename to models/EmbodiedScan/embodiedscan/models/task_modules/assigners/hungarian_assigner.py index 324ade0..09ab818 100644 --- a/embodiedscan/models/task_modules/assigners/hungarian_assigner.py +++ b/models/EmbodiedScan/embodiedscan/models/task_modules/assigners/hungarian_assigner.py @@ -10,13 +10,12 @@ from typing import List, Union import torch +from embodiedscan.registry import TASK_UTILS from mmdet.models.task_modules import AssignResult, BaseAssigner from mmengine import ConfigDict from mmengine.structures import InstanceData from torch import Tensor -from embodiedscan.registry import TASK_UTILS - try: from scipy.optimize import linear_sum_assignment except ImportError: diff --git a/embodiedscan/registry.py b/models/EmbodiedScan/embodiedscan/registry.py similarity index 100% rename from embodiedscan/registry.py rename to models/EmbodiedScan/embodiedscan/registry.py diff --git a/embodiedscan/structures/__init__.py b/models/EmbodiedScan/embodiedscan/structures/__init__.py similarity index 96% rename from embodiedscan/structures/__init__.py rename to models/EmbodiedScan/embodiedscan/structures/__init__.py index b770fe9..1626657 100644 --- a/embodiedscan/structures/__init__.py +++ b/models/EmbodiedScan/embodiedscan/structures/__init__.py @@ -9,5 +9,6 @@ 'BaseInstance3DBoxes', 'Box3DMode', 'Coord3DMode', 'EulerInstance3DBoxes', 'EulerDepthInstance3DBoxes', 'get_box_type', 'get_proj_mat_by_coord_type', 'limit_period', 'mono_cam_box2vis', 'points_cam2img', 'points_img2cam', - 'rotation_3d_in_axis', 'rotation_3d_in_euler', 'xywhr2xyxyr' + 'rotation_3d_in_axis', 'rotation_3d_in_euler', 'xywhr2xyxyr', + 'Det3DDataSample' ] diff --git a/embodiedscan/structures/bbox_3d/__init__.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/__init__.py similarity index 100% rename from embodiedscan/structures/bbox_3d/__init__.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/__init__.py diff --git a/embodiedscan/structures/bbox_3d/base_box3d.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/base_box3d.py similarity index 99% rename from embodiedscan/structures/bbox_3d/base_box3d.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/base_box3d.py index b01779e..9dd4065 100644 --- a/embodiedscan/structures/bbox_3d/base_box3d.py +++ b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/base_box3d.py @@ -4,9 +4,8 @@ import numpy as np import torch -from torch import Tensor - from embodiedscan.structures.points.base_points import BasePoints +from torch import Tensor from .utils import limit_period diff --git a/embodiedscan/structures/bbox_3d/box_3d_mode.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/box_3d_mode.py similarity index 100% rename from embodiedscan/structures/bbox_3d/box_3d_mode.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/box_3d_mode.py diff --git a/embodiedscan/structures/bbox_3d/coord_3d_mode.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/coord_3d_mode.py similarity index 99% rename from embodiedscan/structures/bbox_3d/coord_3d_mode.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/coord_3d_mode.py index be9cb54..8485125 100644 --- a/embodiedscan/structures/bbox_3d/coord_3d_mode.py +++ b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/coord_3d_mode.py @@ -4,10 +4,9 @@ import numpy as np import torch -from torch import Tensor - from embodiedscan.structures.points import (BasePoints, CameraPoints, DepthPoints, LiDARPoints) +from torch import Tensor from .base_box3d import BaseInstance3DBoxes from .box_3d_mode import Box3DMode diff --git a/embodiedscan/structures/bbox_3d/euler_box3d.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/euler_box3d.py similarity index 97% rename from embodiedscan/structures/bbox_3d/euler_box3d.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/euler_box3d.py index 351d591..2893428 100644 --- a/embodiedscan/structures/bbox_3d/euler_box3d.py +++ b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/euler_box3d.py @@ -131,6 +131,11 @@ def overlaps(cls, boxes1, boxes2, mode='iou', eps=1e-4): corners1 = boxes1.corners corners2 = boxes2.corners + import json + with open('./show_error.json', 'w') as f: + json.dump(str(corners1.shape) + str(corners2.shape) + str(eps), f) + with open('./show_error2.json', 'w') as f: + json.dump(str(corners1) + str(corners2), f) _, iou3d = box3d_overlap(corners1, corners2, eps=eps) return iou3d diff --git a/embodiedscan/structures/bbox_3d/euler_depth_box3d.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/euler_depth_box3d.py similarity index 100% rename from embodiedscan/structures/bbox_3d/euler_depth_box3d.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/euler_depth_box3d.py diff --git a/embodiedscan/structures/bbox_3d/utils.py b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/utils.py similarity index 99% rename from embodiedscan/structures/bbox_3d/utils.py rename to models/EmbodiedScan/embodiedscan/structures/bbox_3d/utils.py index 9331efa..cb83312 100644 --- a/embodiedscan/structures/bbox_3d/utils.py +++ b/models/EmbodiedScan/embodiedscan/structures/bbox_3d/utils.py @@ -4,11 +4,10 @@ import numpy as np import torch +from embodiedscan.utils.array_converter import array_converter from pytorch3d.transforms import euler_angles_to_matrix from torch import Tensor -from embodiedscan.utils.array_converter import array_converter - @array_converter(apply_to=('val', )) def limit_period(val: Union[np.ndarray, Tensor], diff --git a/embodiedscan/structures/ops/__init__.py b/models/EmbodiedScan/embodiedscan/structures/ops/__init__.py similarity index 100% rename from embodiedscan/structures/ops/__init__.py rename to models/EmbodiedScan/embodiedscan/structures/ops/__init__.py diff --git a/embodiedscan/structures/ops/box_np_ops.py b/models/EmbodiedScan/embodiedscan/structures/ops/box_np_ops.py similarity index 99% rename from embodiedscan/structures/ops/box_np_ops.py rename to models/EmbodiedScan/embodiedscan/structures/ops/box_np_ops.py index 568d518..5a2fe5a 100644 --- a/embodiedscan/structures/ops/box_np_ops.py +++ b/models/EmbodiedScan/embodiedscan/structures/ops/box_np_ops.py @@ -6,7 +6,6 @@ import numba import numpy as np - from embodiedscan.structures.bbox_3d import (limit_period, points_cam2img, rotation_3d_in_axis) diff --git a/embodiedscan/structures/ops/iou3d_calculator.py b/models/EmbodiedScan/embodiedscan/structures/ops/iou3d_calculator.py similarity index 99% rename from embodiedscan/structures/ops/iou3d_calculator.py rename to models/EmbodiedScan/embodiedscan/structures/ops/iou3d_calculator.py index eefdbf2..9bfb02e 100644 --- a/embodiedscan/structures/ops/iou3d_calculator.py +++ b/models/EmbodiedScan/embodiedscan/structures/ops/iou3d_calculator.py @@ -1,9 +1,8 @@ # Copyright (c) OpenRobotLab. All rights reserved. import torch -from mmdet.structures.bbox import bbox_overlaps - from embodiedscan.registry import TASK_UTILS from embodiedscan.structures.bbox_3d import get_box_type +from mmdet.structures.bbox import bbox_overlaps @TASK_UTILS.register_module() diff --git a/embodiedscan/structures/ops/transforms.py b/models/EmbodiedScan/embodiedscan/structures/ops/transforms.py similarity index 100% rename from embodiedscan/structures/ops/transforms.py rename to models/EmbodiedScan/embodiedscan/structures/ops/transforms.py diff --git a/embodiedscan/structures/points/__init__.py b/models/EmbodiedScan/embodiedscan/structures/points/__init__.py similarity index 100% rename from embodiedscan/structures/points/__init__.py rename to models/EmbodiedScan/embodiedscan/structures/points/__init__.py diff --git a/embodiedscan/structures/points/base_points.py b/models/EmbodiedScan/embodiedscan/structures/points/base_points.py similarity index 99% rename from embodiedscan/structures/points/base_points.py rename to models/EmbodiedScan/embodiedscan/structures/points/base_points.py index 88e641a..32e741d 100644 --- a/embodiedscan/structures/points/base_points.py +++ b/models/EmbodiedScan/embodiedscan/structures/points/base_points.py @@ -5,10 +5,9 @@ import numpy as np import torch -from torch import Tensor - from embodiedscan.structures.bbox_3d.utils import (rotation_3d_in_axis, rotation_3d_in_euler) +from torch import Tensor class BasePoints: diff --git a/embodiedscan/structures/points/cam_points.py b/models/EmbodiedScan/embodiedscan/structures/points/cam_points.py similarity index 100% rename from embodiedscan/structures/points/cam_points.py rename to models/EmbodiedScan/embodiedscan/structures/points/cam_points.py diff --git a/embodiedscan/structures/points/depth_points.py b/models/EmbodiedScan/embodiedscan/structures/points/depth_points.py similarity index 100% rename from embodiedscan/structures/points/depth_points.py rename to models/EmbodiedScan/embodiedscan/structures/points/depth_points.py diff --git a/embodiedscan/structures/points/lidar_points.py b/models/EmbodiedScan/embodiedscan/structures/points/lidar_points.py similarity index 100% rename from embodiedscan/structures/points/lidar_points.py rename to models/EmbodiedScan/embodiedscan/structures/points/lidar_points.py diff --git a/embodiedscan/tutorial.ipynb b/models/EmbodiedScan/embodiedscan/tutorial.ipynb similarity index 99% rename from embodiedscan/tutorial.ipynb rename to models/EmbodiedScan/embodiedscan/tutorial.ipynb index aa7de6b..1f3dea8 100644 --- a/embodiedscan/tutorial.ipynb +++ b/models/EmbodiedScan/embodiedscan/tutorial.ipynb @@ -60,14 +60,14 @@ "# # If there are multiple composed dataset and multiple annotation files\n", "# # this requires 'path/to/dataset' contain the name of composed dataset.\n", "# explorer = EmbodiedScanExplorer(\n", - "# \tdataroot=['path/to/scannet', 'path/to/3rscan', 'path/to/matterport3d', 'path/to/arkitscenes'],\n", + "# \tdataroot=['path/to/scannet', 'path/to/3rscan', 'path/to/matterport3d'],\n", "# \tann_file=['path/to/train.pkl', 'path/to/val.pkl'],\n", "# \tverbose=True,\t# print log or not\n", "# )\n", "\n", "# or\n", "explorer = EmbodiedScanExplorer(\n", - "\tdata_root={'scannet' : 'demo/data/scannet', '3rscan' : 'demo/data/3rscan', 'matterport3d': 'demo/data/matterport3d', 'arkitscenes': 'demo/data/arkitscenes'},\n", + "\tdata_root={'scannet' : 'demo/data/scannet', '3rscan' : 'demo/data/3rscan', 'matterport3d': 'demo/data/matterport3d'},\n", "\tann_file=['demo/data/train.pkl', 'demo/data/val.pkl'],\n", "\tverbose=True,\t# print log or not\n", ")" diff --git a/embodiedscan/utils/__init__.py b/models/EmbodiedScan/embodiedscan/utils/__init__.py similarity index 100% rename from embodiedscan/utils/__init__.py rename to models/EmbodiedScan/embodiedscan/utils/__init__.py diff --git a/embodiedscan/utils/array_converter.py b/models/EmbodiedScan/embodiedscan/utils/array_converter.py similarity index 100% rename from embodiedscan/utils/array_converter.py rename to models/EmbodiedScan/embodiedscan/utils/array_converter.py diff --git a/embodiedscan/utils/default_color_map.py b/models/EmbodiedScan/embodiedscan/utils/default_color_map.py similarity index 100% rename from embodiedscan/utils/default_color_map.py rename to models/EmbodiedScan/embodiedscan/utils/default_color_map.py diff --git a/embodiedscan/utils/dist_utils.py b/models/EmbodiedScan/embodiedscan/utils/dist_utils.py similarity index 100% rename from embodiedscan/utils/dist_utils.py rename to models/EmbodiedScan/embodiedscan/utils/dist_utils.py diff --git a/embodiedscan/utils/line_mesh.py b/models/EmbodiedScan/embodiedscan/utils/line_mesh.py similarity index 100% rename from embodiedscan/utils/line_mesh.py rename to models/EmbodiedScan/embodiedscan/utils/line_mesh.py diff --git a/embodiedscan/utils/typing_config.py b/models/EmbodiedScan/embodiedscan/utils/typing_config.py similarity index 100% rename from embodiedscan/utils/typing_config.py rename to models/EmbodiedScan/embodiedscan/utils/typing_config.py diff --git a/embodiedscan/visualization/__init__.py b/models/EmbodiedScan/embodiedscan/visualization/__init__.py similarity index 100% rename from embodiedscan/visualization/__init__.py rename to models/EmbodiedScan/embodiedscan/visualization/__init__.py diff --git a/embodiedscan/visualization/color_selector.py b/models/EmbodiedScan/embodiedscan/visualization/color_selector.py similarity index 100% rename from embodiedscan/visualization/color_selector.py rename to models/EmbodiedScan/embodiedscan/visualization/color_selector.py diff --git a/embodiedscan/visualization/continuous_drawer.py b/models/EmbodiedScan/embodiedscan/visualization/continuous_drawer.py similarity index 64% rename from embodiedscan/visualization/continuous_drawer.py rename to models/EmbodiedScan/embodiedscan/visualization/continuous_drawer.py index 5843157..8c23fb2 100644 --- a/embodiedscan/visualization/continuous_drawer.py +++ b/models/EmbodiedScan/embodiedscan/visualization/continuous_drawer.py @@ -69,9 +69,6 @@ def begin(self): elif dataset == 'matterport3d': pcdpath = os.path.join(self.dir, building, 'region_segmentations', f'{region}.ply') - elif dataset == 'arkitscenes': - pcdpath = os.path.join(self.dir, building, region, - f'{region}_3dod_mesh.ply') else: self.demo = True self.drawed_boxes = [] @@ -124,9 +121,7 @@ def draw_next(self, vis): if 'depth_cam2img' in img: depth_intrinsic = img['depth_cam2img'] else: - depth_intrinsic = self.scene.get('depth_cam2img', None) - if depth_intrinsic is None: - depth_intrinsic = intrinsic + depth_intrinsic = self.scene['depth_cam2img'] depth_shift = 1000.0 if self.dataset == 'matterport3d': depth_shift = 4000.0 @@ -346,181 +341,3 @@ def close(self, vis): vis.clear_geometries() vis.destroy_window() vis.close() - - -class ContinuousPredictionOccupancyDrawer: - """Visualization tool for Continuous Occupancy Prediction task. - - This class serves as the API for visualizing Continuous 3D Object - Detection task. - - This class is used to render the model's Occupancy Prediction - since the model will have a separate prediction for each frame. - - Args: - dataset (str): Name of composed raw dataset, one of - scannet/3rscan/matterport3d. - dir (str): Root path of the dataset. - scene (dict): Annotation of the selected scene. - classes (list): Class information. - id_to_index (dict): Mapping class id to the index of class names. - color_selector (ColorMap): ColorMap for visualization. - start_idx (int) : Index of the frame which the task starts. - """ - - def __init__(self, dataset, dir, scene, classes, id_to_index, - color_selector, start_idx): - self.dir = dir - self.dataset = dataset - self.scene = scene - self.classes = classes - self.id_to_index = id_to_index - self.color_selector = color_selector - self.idx = start_idx - self.camera = None - - self.point_cloud_range = [ - -3.2, -3.2, -1.28 + 0.5, 3.2, 3.2, 1.28 + 0.5 - ] - self.occ_size = [40, 40, 16] - - self.visible_grid = np.zeros([len(self.scene['images'])] + - self.occ_size, - dtype=bool) - self.grid_size = 0.16 - self.points = [] - - self.vis = o3d.visualization.VisualizerWithKeyCallback() - self.vis.register_key_callback(262, self.draw_next) # Right Arrow - self.vis.register_key_callback(ord('D'), self.draw_next) - self.vis.register_key_callback(ord('N'), self.draw_next) - self.vis.register_key_callback(256, self.close) - - def begin(self): - """Some preparations before starting the rendering.""" - print('Loading RGB-D images...') - for image_idx, image in enumerate(self.scene['images']): - img_path = image['img_path'] - img_path = os.path.join(self.dir, - img_path[img_path.find('/') + 1:]) - depth_path = image['depth_path'] - depth_path = os.path.join(self.dir, - depth_path[depth_path.find('/') + 1:]) - rgb = cv2.imread(img_path)[:, :, ::-1] - depth = cv2.imread(depth_path, cv2.IMREAD_UNCHANGED) - depth = depth.astype(np.float32) / 1000.0 - height, width = rgb.shape[:2] - global2cam = np.linalg.inv( - self.scene['axis_align_matrix'] @ image['cam2global']) - cam2img = image['cam2img'] - - pred_occupancy = image['pred_occupancy'] # shape (40, 40, 16) - - x, y, z = np.meshgrid(np.arange(self.occ_size[0]), - np.arange(self.occ_size[1]), - np.arange(self.occ_size[2]), - indexing='ij') - x, y, z = x.flatten(), y.flatten(), z.flatten() - points_3d = np.stack([x, y, z], axis=-1).reshape( - -1, 3) * self.grid_size + np.array( - self.point_cloud_range[:3]) + self.grid_size / 2.0 - points_3d = np.concatenate( - [points_3d, np.ones( - (points_3d.shape[0], 1))], axis=-1).reshape(-1, 4) - points = (cam2img @ global2cam @ points_3d.T).T - ans = points[:, 2] > 0 - points = points / points[:, 2, None] - ans = ans & (points[:, 0] >= 0) & (points[:, 0] < width) & ( - points[:, 1] >= 0) & (points[:, 1] < height) - self.visible_grid[image_idx] = ans.reshape(self.occ_size) - if image_idx > 0: - self.visible_grid[image_idx] = np.logical_or( - self.visible_grid[image_idx], - self.visible_grid[image_idx - 1]) - - ans = self.visible_grid[image_idx].flatten() & ( - pred_occupancy.flatten() > 0) # - points_3d = points_3d[ans] - pred_occupancy = pred_occupancy.flatten()[ans] - res = np.zeros((points_3d.shape[0], 6)) - if len(points_3d) == 0: - self.points.append(res) - continue - res[:, :3] = points_3d[:, :3] - res[:, 3:] = [ - self.color_selector.get_color( - self.classes[self.id_to_index[label_id]]) - for label_id in pred_occupancy - ] - res[:, 3:] /= 255.0 - self.points.append(res) - - print('Press N/D/Right Arrow to draw next frame.') - print('Press Q to close the window and quit.') - print("When you've rendered a lot of frames, the exit can become", - 'very slow because the program needs time to free up space.') - print('You can also press Esc to close window immediately,', - 'which may result in a segmentation fault.') - - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(self.points[-1][:, :3]) - pcd.colors = o3d.utility.Vector3dVector(self.points[-1][:, 3:]) - voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud( - pcd, voxel_size=self.grid_size) - frame = o3d.geometry.TriangleMesh.create_coordinate_frame() - self.vis.create_window() - self.vis.add_geometry(voxel_grid) - self.vis.add_geometry(frame) - ctr = self.vis.get_view_control() - self.view_param = ctr.convert_to_pinhole_camera_parameters() - self.voxel_grid = voxel_grid - self.draw_next(self.vis) - - def draw_next(self, vis): - """Render the next frame. - - Args: - vis (open3d.visualization.VisualizerWithKeyCallback): Visualizer. - """ - if self.idx >= len(self.scene['images']): - print('No more images') - return - - img = self.scene['images'][self.idx] - extrinsic = self.scene['axis_align_matrix'] @ img['cam2global'] - - pcd = o3d.geometry.PointCloud() - pcd.points = o3d.utility.Vector3dVector(self.points[self.idx][:, :3]) - pcd.colors = o3d.utility.Vector3dVector(self.points[self.idx][:, 3:]) - voxel_grid = o3d.geometry.VoxelGrid.create_from_point_cloud( - pcd, voxel_size=self.grid_size) - - if self.camera is not None: - cam_points = draw_camera(extrinsic, return_points=True) - self.camera.points = cam_points - vis.update_geometry(self.camera) - else: - self.camera = draw_camera(extrinsic) - vis.add_geometry(self.camera) - - self.voxel_grid.clear() - vis.update_geometry(self.voxel_grid) - vis.remove_geometry(self.voxel_grid) - vis.add_geometry(voxel_grid) - self.voxel_grid = voxel_grid - self.idx += 1 - ctr = vis.get_view_control() - ctr.convert_from_pinhole_camera_parameters(self.view_param) - vis.update_renderer() - vis.poll_events() - vis.run() - - def close(self, vis): - """Close the visualizer. - - Args: - vis (open3d.visualization.VisualizerWithKeyCallback): Visualizer. - """ - vis.clear_geometries() - vis.destroy_window() - vis.close() diff --git a/embodiedscan/visualization/default_color_map.py b/models/EmbodiedScan/embodiedscan/visualization/default_color_map.py similarity index 100% rename from embodiedscan/visualization/default_color_map.py rename to models/EmbodiedScan/embodiedscan/visualization/default_color_map.py diff --git a/embodiedscan/visualization/full_color_map.txt b/models/EmbodiedScan/embodiedscan/visualization/full_color_map.txt similarity index 100% rename from embodiedscan/visualization/full_color_map.txt rename to models/EmbodiedScan/embodiedscan/visualization/full_color_map.txt diff --git a/embodiedscan/visualization/img_drawer.py b/models/EmbodiedScan/embodiedscan/visualization/img_drawer.py similarity index 100% rename from embodiedscan/visualization/img_drawer.py rename to models/EmbodiedScan/embodiedscan/visualization/img_drawer.py diff --git a/embodiedscan/visualization/line_mesh.py b/models/EmbodiedScan/embodiedscan/visualization/line_mesh.py similarity index 100% rename from embodiedscan/visualization/line_mesh.py rename to models/EmbodiedScan/embodiedscan/visualization/line_mesh.py diff --git a/embodiedscan/visualization/utils.py b/models/EmbodiedScan/embodiedscan/visualization/utils.py similarity index 100% rename from embodiedscan/visualization/utils.py rename to models/EmbodiedScan/embodiedscan/visualization/utils.py diff --git a/embodiedscan/visualizer/README.md b/models/EmbodiedScan/embodiedscan/visualizer/README.md similarity index 100% rename from embodiedscan/visualizer/README.md rename to models/EmbodiedScan/embodiedscan/visualizer/README.md diff --git a/embodiedscan/visualizer/__init__.py b/models/EmbodiedScan/embodiedscan/visualizer/__init__.py similarity index 100% rename from embodiedscan/visualizer/__init__.py rename to models/EmbodiedScan/embodiedscan/visualizer/__init__.py diff --git a/embodiedscan/visualizer/base_visualizer.py b/models/EmbodiedScan/embodiedscan/visualizer/base_visualizer.py similarity index 99% rename from embodiedscan/visualizer/base_visualizer.py rename to models/EmbodiedScan/embodiedscan/visualizer/base_visualizer.py index a98f1e6..9b7ffab 100644 --- a/embodiedscan/visualizer/base_visualizer.py +++ b/models/EmbodiedScan/embodiedscan/visualizer/base_visualizer.py @@ -1,13 +1,11 @@ import os +from embodiedscan.registry import VISUALIZERS from mmengine.dist import master_only from mmengine.visualization import Visualizer -from embodiedscan.registry import VISUALIZERS - try: import open3d as o3d - from embodiedscan.visualization.utils import _9dof_to_box, nms_filter except ImportError: o3d = None diff --git a/models/EmbodiedScan/install.py b/models/EmbodiedScan/install.py new file mode 100644 index 0000000..e089333 --- /dev/null +++ b/models/EmbodiedScan/install.py @@ -0,0 +1,119 @@ +import argparse +import re +import subprocess +import sys + + +def run_subprocess(command): + try: + process = subprocess.Popen(command, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + universal_newlines=True) + + # Read output and error in real-time + for line in process.stdout: + print(line.strip()) + for line in process.stderr: + print(line.strip()) + + # Wait for the subprocess to finish + process.wait() + + # Get the return code + return_code = process.returncode + + if return_code != 0: + print(f'Command failed with return code {return_code}') + + except subprocess.CalledProcessError as e: + print(f'Command failed with return code {e.returncode}') + print('Error output:') + print(e.output.decode()) + + +def pytorch3d_links(): + try: + import torch + except ImportError as e: + print('Pytorch is not installed.') + raise e + cuda_version = torch.version.cuda + if cuda_version is None: + print('Pytorch is cpu only.') + raise NotImplementedError + + pyt_version_str = torch.__version__.split('+')[0].replace('.', '') + cuda_version_str = torch.version.cuda.replace('.', '') + version_str = ''.join([ + f'py3{sys.version_info.minor}_cu', cuda_version_str, + f'_pyt{pyt_version_str}' + ]) + pytorch3d_links = f'https://dl.fbaipublicfiles.com/pytorch3d/packaging/wheels/{version_str}/download.html' # noqa: E501 + return pytorch3d_links + + +def mmcv_links(): + try: + import torch + except ImportError as e: + print('Pytorch is not installed.') + raise e + cuda_version = torch.version.cuda + if cuda_version is None: + print('Pytorch is cpu only.') + raise NotImplementedError + + cuda_version_str = torch.version.cuda.replace('.', '') + pyt_version = torch.__version__.split('+')[0].split('.') + pyt_version_mmcv = pyt_version[0] + '.' + pyt_version[1] + mmcv_links = f'https://download.openmmlab.com/mmcv/dist/cu{cuda_version_str}/torch{pyt_version_mmcv}/index.html' # noqa: E501 + return mmcv_links + + +def install_package(line): + pat = '(' + '|'.join(['>=', '==', '>', '<', '<=', '@']) + ')' + parts = re.split(pat, line, maxsplit=1) + package_name = parts[0].strip() + print('installing', package_name) + if package_name == 'pytorch3d': + links = pytorch3d_links() + run_subprocess( + [sys.executable, '-m', 'pip', 'install', 'pytorch3d', '-f', links]) + elif package_name == 'mmcv': + links = mmcv_links() + print(links) + run_subprocess( + [sys.executable, '-m', 'pip', 'install', line, '-f', links]) + elif package_name == 'MinkowskiEngine': + run_subprocess([sys.executable, '-m', 'pip', 'install', 'ninja']) + run_subprocess([ + sys.executable, '-m', 'pip', 'install', '-U', + 'git+https://github.com/NVIDIA/MinkowskiEngine', '--no-deps' + ]) # noqa: E501 + else: + run_subprocess([sys.executable, '-m', 'pip', 'install', line]) + + +def install_requires(fname): + with open(fname, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line: + install_package(line) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser( + description='Install Embodiedscan from pre-built package.') + parser.add_argument('mode', default=None) + args = parser.parse_args() + + install_requires('requirements/base.txt') + if args.mode == 'visual' or args.mode == 'all': + install_requires('requirements/visual.txt') + + if args.mode == 'run' or args.mode == 'all': + install_requires('requirements/run.txt') + + run_subprocess([sys.executable, '-m', 'pip', 'install', '-e', '.']) diff --git a/models/EmbodiedScan/lry_utils/utils_read.py b/models/EmbodiedScan/lry_utils/utils_read.py new file mode 100644 index 0000000..c322acd --- /dev/null +++ b/models/EmbodiedScan/lry_utils/utils_read.py @@ -0,0 +1,572 @@ +import json +import os + +import cv2 +import numpy as np +from tqdm import tqdm + +EXCLUDED_OBJECTS = ['wall', 'ceiling', 'floor'] + + +def reverse_multi2multi_mapping(mapping): + """ + Args: + mapping: dict in format key1:[value1, value2], key2:[value2, value3] + Returns: + mapping: dict in format value1:[key1], value2:[key1, key2], value3:[key2] + """ + output = {} + possible_values = [] + for key, values in mapping.items(): + for value in values: + possible_values.append(value) + possible_values = list(set(possible_values)) + for value in possible_values: + output[value] = [] + for key, values in mapping.items(): + for value in values: + output[value].append(key) + return output + + +def reverse_121_mapping(mapping): + """Reverse a 1-to-1 mapping. + + Args: + mapping: dict in format key1:value1, key2:value2 + Returns: + mapping: dict in format value1:key1, value2:key2 + """ + return {v: k for k, v in mapping.items()} + + +def load_json(path): + if os.path.getsize(path) == 0: + return None + with open(path, 'r', encoding='utf-8') as f: + data = json.load(f) + return data + + +def read_extrinsic_dir(directory): + """ + Returns: + extrinsics: numpy array of extrinsic matrices, shape (N, 4, 4) + ids: list of ids (str) of matrix files. + """ + extrinsics = [] + ids = [] + for file in os.listdir(directory): + if file.endswith('.txt') or file.endswith('.npy'): + if file.startswith('depth_intrinsic') or file.startswith( + 'intrinsic'): + continue + path = os.path.join(directory, file) + extrinsics.append(read_extrinsic(path)) + path = path.replace('\\', '/') + ids.append(file.split('.')[0]) + return extrinsics, ids + + +def _pad_extrinsic(mat): + """transforms the extrinsic matrix to the 4x4 form.""" + mat = np.array(mat) + if mat.shape == (3, 4): + mat = np.vstack((mat, [0, 0, 0, 1])) + elif mat.shape != (4, 4): + raise ValueError('Invalid shape of matrix.') + return mat + + +def read_extrinsic(path): + """returns a 4x4 numpy array of intrinsic matrix.""" + if path.endswith('.txt'): + mat = np.loadtxt(path) + return _pad_extrinsic(mat) + elif path.endswith('.npy'): + mat = np.load(path) + return _pad_extrinsic(mat) + else: + raise ValueError('Invalid file extension.') + + +def _read_intrinsic_mp3d(path): + a = np.loadtxt(path) + intrinsic = np.identity(4, dtype=float) + intrinsic[0][0] = a[2] # fx + intrinsic[1][1] = a[3] # fy + intrinsic[0][2] = a[4] # cx + intrinsic[1][2] = a[5] # cy + # a[0], a[1] are the width and height of the image + return intrinsic + + +def _read_intrinsic_scannet(path): + intrinsic = np.loadtxt(path) + return intrinsic + + +def read_intrinsic(path, mode='scannet'): + """Reads intrinsic matrix from file. + + Returns: + extended intrinsic of shape (4, 4) + """ + if mode == 'scannet': + return _read_intrinsic_scannet(path) + elif mode == 'mp3d': + return _read_intrinsic_mp3d(path) + else: + raise ValueError('Invalid mode {}.'.format(mode)) + + +def _read_axis_align_matrix_scannet(path): + with open(path, 'r') as file: + first_line = file.readline() + vals = first_line.strip().split(' ')[2:] + vals = np.array(vals, dtype=np.float64) + output = vals.reshape(4, 4) + return output + + +def read_axis_align_matrix(path, mode): + if mode == 'scannet': + return _read_axis_align_matrix_scannet(path) + else: + raise ValueError('Invalid mode {}.'.format(mode)) + + +def read_depth_map(path): + """Reads depth map from file. + + Returns: + depth: numpy array of depth values, shape (H, W) + """ + if '3rscan' in path: + path = path[:-4] + '.pgm' + depth_map = cv2.imread(path, cv2.IMREAD_UNCHANGED) + if depth_map is None: + raise ValueError(f'Cannot read file {path}') + depth_map = depth_map / 1000.0 # '/=' does not work. Interesting. + if 'matterport' in path or 'mp3d' in path: + depth_map /= 4.0 # for matterport, depth should be divided by 4000 + return depth_map + + +def read_bboxes_json(path, return_id=False, return_type=False): + """ + Returns: + boxes: numpy array of bounding boxes, shape (M, 9): xyz, lwh, ypr + ids: (optional) numpy array of obj ids, shape (M,) + types: (optional) list of strings, each string is a type of object + """ + with open(path, 'r') as f: + bboxes_json = json.load(f) + boxes = [] + ids = [] + types = [] + for i in range(len(bboxes_json)): + if bboxes_json[i]['obj_type'] in EXCLUDED_OBJECTS: + continue + box = bboxes_json[i]['psr'] + position = np.array( + [box['position']['x'], box['position']['y'], box['position']['z']]) + size = np.array( + [box['scale']['x'], box['scale']['y'], box['scale']['z']]) + euler_angles = np.array( + [box['rotation']['x'], box['rotation']['y'], box['rotation']['z']]) + boxes.append(np.concatenate([position, size, euler_angles])) + ids.append(int(bboxes_json[i]['obj_id'])) + types.append(bboxes_json[i]['obj_type']) + boxes = np.array(boxes) + if return_id and return_type: + ids = np.array(ids) + return boxes, ids, types + if return_id: + ids = np.array(ids) + return boxes, ids + if return_type: + return boxes, types + return boxes + + +def get_scene_prefix(path): + if '3rscan' in path: + return '3rscan' + elif 'matterport' in path or 'mp3d' in path: + return 'matterport3d' + elif 'scene' in path: + return 'scannet' + else: + return '' + + +def read_type2int(path): + with open(path, 'rb') as f: + data = np.load(f, allow_pickle=True) + metainfo = data['metainfo'] + object_type_to_int = metainfo['categories'] + return object_type_to_int + + +def apply_mapping_to_keys(d, mappings): + """ + Args: + d: a dictionary + mappings: dictionary(s) of mappings, e.g. {"old_key1": "new_key1", "old_key2": "new_key2"} + Returns: + a new dictionary with keys changed according to mappings + """ + if not isinstance(mappings, list): + mappings = [mappings] + for mapping in mappings: + d = {mapping.get(k, k): v for k, v in d.items()} + return d + + +def read_annotation_pickle(path, show_progress=True): + """ + Returns: A dictionary. Format. scene_id : (bboxes, object_ids, object_types, visible_view_object_dict, extrinsics_c2w, axis_align_matrix, intrinsics, image_paths) + bboxes: numpy array of bounding boxes, shape (N, 9): xyz, lwh, ypr + object_ids: numpy array of obj ids, shape (N,) + object_types: list of strings, each string is a type of object + visible_view_object_dict: a dictionary {view_id: visible_instance_ids} + extrinsics_c2w: a list of 4x4 matrices, each matrix is the extrinsic matrix of a view + axis_align_matrix: a 4x4 matrix, the axis-aligned matrix of the scene + intrinsics: a list of 4x4 matrices, each matrix is the intrinsic matrix of a view + image_paths: a list of strings, each string is the path of an image in the scene + """ + with open(path, 'rb') as f: + data = np.load(f, allow_pickle=True) + metainfo = data['metainfo'] + object_type_to_int = metainfo['categories'] + object_int_to_type = {v: k for k, v in object_type_to_int.items()} + datalist = data['data_list'] + output_data = {} + pbar = tqdm(range(len(datalist))) if show_progress else range( + len(datalist)) + for scene_idx in pbar: + images = datalist[scene_idx]['images'] + intrinsic = datalist[scene_idx].get('cam2img', None) # a 4x4 matrix + missing_intrinsic = False + if intrinsic is None: + missing_intrinsic = True # each view has different intrinsic for mp3d + depth_intrinsic = datalist[scene_idx].get( + 'cam2depth', None) # a 4x4 matrix, for 3rscan + if depth_intrinsic is None and not missing_intrinsic: + depth_intrinsic = datalist[scene_idx][ + 'depth2img'] # a 4x4 matrix, for scannet + axis_align_matrix = datalist[scene_idx][ + 'axis_align_matrix'] # a 4x4 matrix + scene_id = images[0]['img_path'].split('/')[-2] # str + + instances = datalist[scene_idx]['instances'] + bboxes = [] + object_ids = [] + object_types = [] + object_type_ints = [] + for object_idx in range(len(instances)): + bbox_3d = instances[object_idx]['bbox_3d'] # list of 9 values + bbox_label_3d = instances[object_idx]['bbox_label_3d'] # int + bbox_id = instances[object_idx]['bbox_id'] # int + object_type = object_int_to_type[bbox_label_3d] + # if object_type in EXCLUDED_OBJECTS: + # continue + object_type_ints.append(bbox_label_3d) + object_types.append(object_type) + bboxes.append(bbox_3d) + object_ids.append(bbox_id) + bboxes = np.array(bboxes) + object_ids = np.array(object_ids) + object_type_ints = np.array(object_type_ints) + + visible_view_object_dict = {} + extrinsics_c2w = [] + intrinsics = [] + depth_intrinsics = [] + image_paths = [] + for image_idx in range(len(images)): + img_path = images[image_idx]['img_path'] # str + if len(img_path.split('/')) == 3: # should be 4, add prefix + # example input: posed_images/3rscan0001/000000.jpg + # example output: 3rscan/posed_images/3rscan0001/000000.jpg + scene_prefix = get_scene_prefix(img_path) + img_path = os.path.join(scene_prefix, img_path) + extrinsic_id = img_path.split('/')[-1].split('.')[0] # str + cam2global = images[image_idx]['cam2global'] # a 4x4 matrix + if missing_intrinsic: + intrinsic = images[image_idx]['cam2img'] + depth_intrinsic = images[image_idx]['cam2depth'] + visible_instance_indices = images[image_idx][ + 'visible_instance_ids'] # numpy array of int + visible_instance_ids = object_ids[visible_instance_indices] + visible_view_object_dict[extrinsic_id] = visible_instance_ids + extrinsics_c2w.append(cam2global) + intrinsics.append(intrinsic) + depth_intrinsics.append(depth_intrinsic) + image_paths.append(img_path) + if show_progress: + pbar.set_description(f'Processing scene {scene_id}') + output_data[scene_id] = { + 'bboxes': bboxes, + 'object_ids': object_ids, + 'object_types': object_types, + 'object_type_ints': object_type_ints, + 'visible_view_object_dict': visible_view_object_dict, + 'extrinsics_c2w': extrinsics_c2w, + 'axis_align_matrix': axis_align_matrix, + 'intrinsics': intrinsics, + 'depth_intrinsics': depth_intrinsics, + 'image_paths': image_paths, + } + return output_data + + +def read_annotation_pickles(paths): + """Read multiple annotation pickles and merge them into one dictionary. + + Args: + paths: a list of paths to annotation pickles. + Returns: Please refer to the return value of read_annotation_pickle() + """ + output_data = {} + if isinstance(paths, str): + paths = [paths] + for path in paths: + data = read_annotation_pickle(path) + output_data.update(data) + output_data = dict(sorted(output_data.items())) + return output_data + + +def read_scene_id_mapping(mode): + assert mode in ['mp3d', '3rscan'] # scannet do not need this mapping + fname = f'/mnt/petrelfs/linjingli/mmscan_modelzoo-main/embodiedscan_infos/{mode}_mapping.json' + + if not os.path.exists(fname): + print(f'Warning: cannot find {fname}') + return {} + with open(fname, 'r') as f: + mapping = json.load(f) + return mapping + + +RAW2NUM_3RSCAN = read_scene_id_mapping('3rscan') +NUM2RAW_3RSCAN = {v: k for k, v in RAW2NUM_3RSCAN.items()} +RAW2NUM_MP3D = read_scene_id_mapping('mp3d') +NUM2RAW_MP3D = {v: k for k, v in RAW2NUM_MP3D.items()} + + +def is_valid_name(name): + is_scannet = 'scene' in name or 'scannet' in name + is_3rscan = '3rscan' in name + is_mp3d = 'mp3d' in name or 'matterport' in name + is_valid = is_scannet + is_3rscan + is_mp3d == 1 + if not is_valid: + print(f'Invalid name {name}') + return is_valid + + +def is_sample_idx(name): + if not is_valid_name(name): + return False + length = len(name.split('/')) + return length >= 2 + + +def is_scene_id(name): + if not is_valid_name(name): + return False + length = len(name.split('/')) + return length == 1 + + +def sample_idx_to_scene_id(sample_idx): + """sample index follows the "raw" rule, directly downloaded from the + internet. + + scene_id follows the "num"bered rule, used in the dataset. + """ + is_scannet = 'scannet' in sample_idx + is_3rscan = '3rscan' in sample_idx + is_mp3d = 'mp3d' in sample_idx or 'matterport' in sample_idx + assert is_scannet + is_3rscan + is_mp3d == 1, f'Invalid sample_idx {sample_idx}' + if is_scannet: + scene_id = sample_idx.split('/')[-1] + elif is_3rscan: + raw_id = sample_idx.split('/')[-1] + scene_id = RAW2NUM_3RSCAN[raw_id] + elif is_mp3d: + _, raw_id, region_id = sample_idx.split('/') + scene_id = RAW2NUM_MP3D[raw_id] + scene_id = f'{scene_id}_{region_id}' + return scene_id + + +def scene_id_to_sample_idx(scene_id): + is_scannet = 'scene' in scene_id + is_3rscan = '3rscan' in scene_id + is_mp3d = 'mp3d' in scene_id + assert is_scannet + is_3rscan + is_mp3d == 1, f'Invalid scene_id {scene_id}' + if is_scannet: + sample_idx = f'scannet/{scene_id}' + elif is_3rscan: + raw_id = NUM2RAW_3RSCAN[scene_id] + sample_idx = f'3rscan/{raw_id}' + elif is_mp3d: + scene_id, region_id = scene_id.split('_region') + raw_id = NUM2RAW_MP3D[scene_id] + sample_idx = f'matterport3d/{raw_id}/region{region_id}' + return sample_idx + + +def to_scene_id(name): + return name if is_scene_id(name) else sample_idx_to_scene_id(name) + + +def to_sample_idx(name): + return name if is_sample_idx(name) else scene_id_to_sample_idx(name) + + +def read_es_info(path, show_progress=False, count_type_from_zero=False): + data = np.load(path, allow_pickle=True) + data_list = data['data_list'] + object_type_to_int = data['metainfo']['categories'] + object_int_to_type = {v: k for k, v in object_type_to_int.items()} + output_data = {} + pbar = tqdm(data_list) if show_progress else data_list + for data in pbar: + if 'sample_idx' in data: + sample_idx = data['sample_idx'] + scene_id = sample_idx_to_scene_id(sample_idx) + else: + scene_id = data['images'][0]['img_path'].split('/')[-2] # str + sample_idx = scene_id_to_sample_idx(scene_id) + bboxes, object_ids, object_types_int, object_types = [], [], [], [] + for inst in data.get('instances', []): + bbox_label_3d = inst['bbox_label_3d'] + object_type = object_int_to_type[bbox_label_3d] + bbox_label_3d -= 1 if count_type_from_zero else 0 + bboxes.append(inst['bbox_3d']) + object_ids.append(inst['bbox_id']) + object_types_int.append(bbox_label_3d) + object_types.append(object_type) + + bboxes = np.array(bboxes) + object_ids = np.array(object_ids) + object_types_int = np.array(object_types_int) + + output_data[scene_id] = { + 'scene_id': scene_id, + 'sample_idx': sample_idx, + 'bboxes': bboxes, + 'object_ids': object_ids, + 'object_types': object_types, + 'object_type_ints': object_types_int, + 'axis_align_matrix': data.get('axis_align_matrix', None), + } + return output_data + + +def read_es_infos(paths, show_progress=False, count_type_from_zero=False): + output_data = {} + if isinstance(paths, str): + paths = [paths] + for path in paths: + data = read_es_info(path, show_progress, count_type_from_zero) + output_data.update(data) + return output_data + + +def to_list_of_int(x): + # x may be a single int, or a list of str(int), or a list of int + if isinstance(x, int): + return [x] + elif isinstance(x, str): + return [int(x)] + elif isinstance(x, list): + return [int(i) for i in x] + else: + raise ValueError(f'Invalid input {x} of type {type(x)}') + + +def to_list_of_str(x): + # x may be a single str, or a list of str, or a list of int + if isinstance(x, str): + return [x] + elif isinstance(x, int): + return [str(x)] + elif isinstance(x, list): + return [str(i) for i in x] + else: + raise ValueError(f'Invalid input {x} of type {type(x)}') + + +def load_vg_data(path, all_es_info): + if isinstance(all_es_info, str): + all_es_info = read_es_info(all_es_info) + raw_vg_data = json.load(open(path, 'r')) + # vg txt的一个dict的例子:{"sub_class": "VG_Direct_Attribute_O_Common", "scan_id": "1mp3d_0000_region7", "target_id": [5, 3, 16, 4, 27, 9, 34, 38, 44, 17, 20, 29, 35], "distractor_ids": [], "text": "Find all the items with furniture coarse grained category in the room.", "target": ["rail", "stairs", "ladder", "shelf", "desk", "window", "object", "object", "object", "chair", "bench", "chair", "object"], "anchors": [], "anchor_ids": [], "tokens_positive": {"3": [[13, 57]], "4": [[13, 57]], "5": [[13, 57]], "9": [[13, 57]], "16": [[13, 57]], "17": [[13, 57]], "20": [[13, 57]], "27": [[13, 57]], "29": [[13, 57]], "34": [[13, 57]], "35": [[13, 57]], "38": [[13, 57]], "44": [[13, 57]]}, "ID": "VG_Direct_Attribute_O_Common_0"} + vg_data = [] + for i, raw_vg in enumerate(raw_vg_data): + scan_id = raw_vg['scan_id'] + if scan_id not in all_es_info: + continue + es_info = all_es_info[scan_id] + bboxes = es_info['bboxes'] + object_ids = list(es_info['object_ids']) + object_types = es_info['object_types'] + + item_id = raw_vg['ID'] + sub_class = raw_vg['sub_class'].lower() + space = 'space' in sub_class + attribute = 'attribute' in sub_class + assert space + attribute == 1, f'Invalid sub_class {sub_class}, should be space or attribute' + indirect = 'indirect' in sub_class + direct = not indirect + assert 'direct' in sub_class, f'Invalid sub_class {sub_class}, should contain the word direct' + target_ids = to_list_of_int(raw_vg['target_id']) + target_idxs = [object_ids.index(i) for i in target_ids] + target_bboxes = bboxes[target_idxs] + multi = len(target_ids) > 1 + distractor_ids = to_list_of_int(raw_vg['distractor_ids']) + distractor_idxs = [object_ids.index(i) for i in distractor_ids] + hard = len(distractor_ids) >= 3 + text = raw_vg['text'] + targets = to_list_of_str(raw_vg['target']) + anchors = to_list_of_str(raw_vg['anchors']) + anchor_ids = to_list_of_int(raw_vg['anchor_ids']) + anchor_idxs = [object_ids.index(i) for i in anchor_ids] + tokens_positive = raw_vg['tokens_positive'] + # tokens_positive = {int(k): v for k, v in tokens_positive.items()} + + data_dict = { + 'scan_id': scan_id, + 'item_id': item_id, + 'space': space, + 'direct': direct, + 'multi': multi, + 'hard': hard, + 'sub_class': sub_class, + 'target_ids': target_ids, + 'target_idxs': target_idxs, + 'target_bboxes': target_bboxes, + 'distractor_ids': distractor_ids, + 'distractor_idxs': distractor_idxs, + 'text': text, + 'targets': targets, + 'anchors': anchors, + 'anchor_ids': anchor_ids, + 'anchor_idxs': anchor_idxs, + 'tokens_positive': tokens_positive, + } + vg_data.append(data_dict) + del raw_vg_data + return vg_data + + +if __name__ == '__main__': + # pickle_file = "D:\Projects\shared_data\embodiedscan_infos\competition_ver\embodiedscan_infos_val.pkl" + pickle_file = 'D:\Projects\shared_data\embodiedscan_infos\embodiedscan_infos_val_full.pkl' + read_es_infos(pickle_file) + # read_annotation_pickle(pickle_file) diff --git a/models/EmbodiedScan/requirements/base.txt b/models/EmbodiedScan/requirements/base.txt new file mode 100644 index 0000000..9283454 --- /dev/null +++ b/models/EmbodiedScan/requirements/base.txt @@ -0,0 +1,5 @@ +mmengine +numpy==1.23.5 +opencv-python +torch +tqdm diff --git a/models/EmbodiedScan/requirements/run.txt b/models/EmbodiedScan/requirements/run.txt new file mode 100644 index 0000000..8a7390b --- /dev/null +++ b/models/EmbodiedScan/requirements/run.txt @@ -0,0 +1,7 @@ + +mmcv==2.0.0rc4 +mmdet +mmengine +ninja +pytorch3d +transformers diff --git a/requirements/visual.txt b/models/EmbodiedScan/requirements/visual.txt similarity index 100% rename from requirements/visual.txt rename to models/EmbodiedScan/requirements/visual.txt diff --git a/models/EmbodiedScan/setup.py b/models/EmbodiedScan/setup.py new file mode 100644 index 0000000..e5727ba --- /dev/null +++ b/models/EmbodiedScan/setup.py @@ -0,0 +1,110 @@ +from setuptools import find_packages, setup + + +def parse_requirements(fname='requirements.txt', with_version=True): + """Parse the package dependencies listed in a requirements file but strips + specific versioning information. + + Args: + fname (str): path to requirements file + with_version (bool, default=False): if True include version specs + + Returns: + list[str]: list of requirements items + + CommandLine: + python -c "import setup; print(setup.parse_requirements())" + """ + import re + import sys + from os.path import exists + require_fpath = fname + + def parse_line(line): + """Parse information from a line in a requirements text file.""" + if line.startswith('-r '): + # Allow specifying requirements in other files + target = line.split(' ')[1] + for info in parse_require_file(target): + yield info + else: + info = {'line': line} + if line.startswith('-e '): + info['package'] = line.split('#egg=')[1] + else: + # Remove versioning from the package + pat = '(' + '|'.join(['>=', '==', '>']) + ')' + parts = re.split(pat, line, maxsplit=1) + parts = [p.strip() for p in parts] + + info['package'] = parts[0] + if len(parts) > 1: + op, rest = parts[1:] + if ';' in rest: + # Handle platform specific dependencies + # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies + version, platform_deps = map(str.strip, + rest.split(';')) + info['platform_deps'] = platform_deps + else: + version = rest # NOQA + info['version'] = (op, version) + yield info + + def parse_require_file(fpath): + with open(fpath, 'r') as f: + for line in f.readlines(): + line = line.strip() + if line and not line.startswith('#'): + for info in parse_line(line): + yield info + + def gen_packages_items(): + if exists(require_fpath): + for info in parse_require_file(require_fpath): + parts = [info['package']] + if with_version and 'version' in info: + parts.extend(info['version']) + if not sys.version.startswith('3.4'): + # apparently package_deps are broken in 3.4 + platform_deps = info.get('platform_deps') + if platform_deps is not None: + parts.append(';' + platform_deps) + item = ''.join(parts) + yield item + + packages = list(gen_packages_items()) + return packages + + +if __name__ == '__main__': + setup( + name='embodiedscan', + version='0.1', + description='EmbodiedScan', + # long_description=readme(), + # long_description_content_type='text/markdown', + author='author', # TODO + author_email='email', # TODO + keywords='computer vision, 3D object detection', + url='https://github.com/open-mmlab/mmdetection3d', + packages=find_packages(exclude=('configs', 'tools', 'demo')), + include_package_data=True, + classifiers=[ + 'Development Status :: 3 - Alpha', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Programming Language :: Python :: 3', + 'Programming Language :: Python :: 3.7', + 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', + ], + python_requires='>=3.7', + license='Apache License 2.0', + install_requires=parse_requirements('requirements/base.txt'), + extras_require={ + 'visual': parse_requirements('requirements/visual.txt'), + }, + ext_modules=[], + # cmdclass={'build_ext': BuildExtension}, + zip_safe=False) diff --git a/models/EmbodiedScan/test.sh b/models/EmbodiedScan/test.sh new file mode 100644 index 0000000..d150477 --- /dev/null +++ b/models/EmbodiedScan/test.sh @@ -0,0 +1,7 @@ +python tools/test.py configs/grounding/pcd_vg_1030.py /mnt/petrelfs/linjingli/tmp/code/MMScan-code/VG/benchmark/EmbodiedScan/exps/MMScan-VG-1030/epoch_12.pth --work-dir exps/MMScan-VG-1030 --launcher="slurm" +# GPUS=4 +# CONFIG=configs/grounding/pcd_vg_1030.py +# WORK_DIR=exps/MMScan-VG-1030 +# PORT=`expr $RANDOM % 4000 + 25000` + +# python -u tools/train.py ${CONFIG} --work-dir=${WORK_DIR} --launcher="slurm" --cfg-options env_cfg.dist_cfg.port=${PORT} diff --git a/tools/eval_script.py b/models/EmbodiedScan/tools/eval_script.py similarity index 99% rename from tools/eval_script.py rename to models/EmbodiedScan/tools/eval_script.py index f95f30e..1d417a8 100644 --- a/tools/eval_script.py +++ b/models/EmbodiedScan/tools/eval_script.py @@ -2,11 +2,10 @@ import argparse import mmengine +from embodiedscan.structures import EulerDepthInstance3DBoxes from mmengine.logging import print_log from terminaltables import AsciiTable -from embodiedscan.structures import EulerDepthInstance3DBoxes - def parse_args(): parser = argparse.ArgumentParser( diff --git a/tools/eval_script_portable.py b/models/EmbodiedScan/tools/eval_script_portable.py similarity index 100% rename from tools/eval_script_portable.py rename to models/EmbodiedScan/tools/eval_script_portable.py diff --git a/tools/mv-grounding.sh b/models/EmbodiedScan/tools/mv-grounding.sh similarity index 90% rename from tools/mv-grounding.sh rename to models/EmbodiedScan/tools/mv-grounding.sh index 70241a5..e1bd14a 100644 --- a/tools/mv-grounding.sh +++ b/models/EmbodiedScan/tools/mv-grounding.sh @@ -2,8 +2,8 @@ set -x -CKPT_PATH=/mnt/petrelfs/wangtai/EmbodiedScan/work_dirs -PARTITION=test +CKPT_PATH=/mnt/petrelfs/lvruiyuan/repos/EmbodiedScan/work_dirs +PARTITION=mozi-S1 JOB_NAME=mv-grounding-challenge-benchmark TASK=mv-grounding-challenge-benchmark CONFIG=configs/grounding/mv-grounding_8xb12_embodiedscan-vg-9dof.py diff --git a/tools/submit_results.py b/models/EmbodiedScan/tools/submit_results.py similarity index 100% rename from tools/submit_results.py rename to models/EmbodiedScan/tools/submit_results.py diff --git a/tools/test.py b/models/EmbodiedScan/tools/test.py similarity index 98% rename from tools/test.py rename to models/EmbodiedScan/tools/test.py index 1d2df60..5742e10 100644 --- a/tools/test.py +++ b/models/EmbodiedScan/tools/test.py @@ -58,7 +58,7 @@ def parse_args(): 'is allowed.') parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm', 'mpi'], - default='none', + default='slurm', help='job launcher') parser.add_argument('--tta', action='store_true', @@ -103,6 +103,7 @@ def trigger_visualization_hook(cfg, args): def main(): + os.environ['RANK'] = str(0) args = parse_args() # load config diff --git a/tools/train.py b/models/EmbodiedScan/tools/train.py similarity index 100% rename from tools/train.py rename to models/EmbodiedScan/tools/train.py diff --git a/models/EmbodiedScan/train.sh b/models/EmbodiedScan/train.sh new file mode 100644 index 0000000..3666025 --- /dev/null +++ b/models/EmbodiedScan/train.sh @@ -0,0 +1 @@ +python -m torch.distributed.launch --nproc_per_node=4 tools/train.py configs/grounding/pcd_vg_1030.py --work-dir exps/MMScan-VG-1030 --launcher="pytorch" diff --git a/models/LEO/LICENSE b/models/LEO/LICENSE new file mode 100644 index 0000000..83bea46 --- /dev/null +++ b/models/LEO/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2023 embodied-generalist + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/models/LEO/common/io_utils.py b/models/LEO/common/io_utils.py new file mode 100755 index 0000000..4bffe3e --- /dev/null +++ b/models/LEO/common/io_utils.py @@ -0,0 +1,106 @@ +import csv +import json +import pickle +from pathlib import Path + +import cv2 +import numpy as np +import torch +import yaml +from omegaconf import OmegaConf + + +def make_dir(dir_path): + if not Path(dir_path).exists(): + Path(dir_path).mkdir(parents=True, exist_ok=True) + + +def load_imgs(img_paths, option=cv2.IMREAD_COLOR): + imgs = [cv2.imread(img_path, option) for img_path in img_paths] + return imgs + + +def load_pickle(filename): + with Path(filename).open('rb') as f: + return pickle.load(f) + + +def save_pickle(data, filename): + with Path(filename).open('wb') as f: + pickle.dump(data, f, protocol=pickle.HIGHEST_PROTOCOL) + + +def load_json(filename): + with Path(filename).open('rb') as f: + return json.load(f) + + +def save_json(data, filename, save_pretty=True, sort_keys=False): + with Path(filename).open('w') as f: + if save_pretty: + f.write(json.dumps(data, indent=4, sort_keys=sort_keys)) + else: + json.dump(data, f) + + +def load_jsonl(filename): + with Path(filename).open('r') as f: + return [json.loads(l.strip('\n')) for l in f.readlines()] + + +def save_jsonl(data, filename): + with Path(filename).open('w') as f: + f.write('\n'.join([json.dumps(e) for e in data])) + + +def load_yaml(filename): + with Path(filename).open('r') as f: + return yaml.load(f, Loader=yaml.SafeLoader) + + +def save_yaml(data, filename): + with Path(filename).open('w') as f: + json.dump(OmegaConf.to_container(data, resolve=True), f, indent=2) + + +def load_csv(filename, delimiter=','): + idx2key = None + contents = {} + with Path(filename).open('r') as f: + reader = csv.reader(f, delimiter=delimiter) + for l_idx, row in reader: + if l_idx == 0: + idx2key = row + for k_idx, key in enumerate(idx2key): + contents[key] = [] + else: + for c_idx, col in enumerate(row): + contents[idx2key[c_idx]].append(col) + return contents, idx2key + + +def save_csv(data, filename, cols=None, delimiter=','): + with Path(filename).open('w') as f: + writer = csv.writer(f, delimiter=delimiter) + num_entries = len(data[list(data.keys())[0]]) + assert cols is not None, 'Must have column names for dumping csv files.' + writer.writerow(cols) + for l_idx in range(num_entries): + row = [data[key][l_idx] for key in cols] + writer.writerow(row) + + +def load_numpy(filename): + return np.load(filename, allow_pickle=True) + + +def save_numpy(data, filename): + np.save(filename, data, allow_pickle=True) + + +def load_tensor(filename): + return torch.load(filename) + + +def save_tensor(data, filename): + torch.save(data, filename) diff --git a/models/LEO/common/launch_utils.py b/models/LEO/common/launch_utils.py new file mode 100755 index 0000000..10ef564 --- /dev/null +++ b/models/LEO/common/launch_utils.py @@ -0,0 +1,145 @@ +import os +import socket +import subprocess +from pathlib import Path + +import submitit + + +def get_available_port(): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + s.bind(('localhost', 0)) + # getsockname returns the address and port the socket is bound to + port = s.getsockname()[1] + return port + + +def is_port_available(port, host='localhost'): + with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: + try: + s.bind((host, port)) + return True + except socket.error: + return False + + +class SubmititLauncher: + + def __init__(self, args): + self.args = args + + def __call__(self): + host_name = os.popen('scontrol show hostnames $SLURM_JOB_NODELIST' + ).read().split('\n')[0] + self._set_gpu_args() + if not self.args.port or not is_port_available(self.args.port, + host_name): + port = get_available_port() + print('Use system assigned port for distributed training: ', port) + else: + port = self.args.port + # Using Accelerate for launching + multi_gpu = '--multi_gpu' if self.args.num_nodes * self.args.gpu_per_node > 1 else '' + opts = ' '.join(self.args.opts) if len(self.args.opts) > 0 else '' + opts += f' num_gpu={self.args.num_nodes * self.args.gpu_per_node} ' + full_cfg_path = Path(self.args.config) + cfg_path, cfg_file = str(full_cfg_path.parent), str(full_cfg_path.name) + cmd = f'accelerate launch \ + --num_machines {self.args.num_nodes} \ + --mixed_precision {self.args.mixed_precision} {multi_gpu} \ + --num_processes {self.args.gpu_per_node * self.args.num_nodes} \ + --num_cpu_threads_per_process {self.args.cpu_per_task} \ + --main_process_ip {host_name} \ + --main_process_port {self.args.port} \ + --machine_rank {self.args.node_id} \ + --dynamo_backend no \ + {self.args.run_file} \ + --config-path {cfg_path} \ + --config-name {cfg_file} \ + num_gpu={self.args.num_nodes * self.args.gpu_per_node} \ + hydra.run.dir=. \ + hydra.output_subdir=null \ + ~hydra.job_logging.handlers.file \ + hydra.job_logging.root.handlers=[console] \ + hydra/hydra_logging=disabled {opts}' + + subprocess.run(cmd, shell=True) + + def _set_gpu_args(self): + job_env = submitit.JobEnvironment() + self.args.job_dir = str(self.args.job_dir).replace( + '%j', job_env.job_id) + self.args.node_id = int(job_env.global_rank / self.args.gpu_per_node) + + +def submitit_launch(args): + """Multi node script launching with Submitit.""" + additional_parameters = {} + if args.nodelist != '': + # if specifying node id + nodelist = f'{str(args.nodelist)}' + additional_parameters['nodelist'] = nodelist + + executor = submitit.AutoExecutor(folder=args.job_dir, + slurm_max_num_timeout=30) + executor.update_parameters( + name=args.name, + mem_gb=args.mem_per_gpu * args.gpu_per_node * args.num_nodes, + gpus_per_node=args.gpu_per_node, + tasks_per_node=1, + cpus_per_task=args.gpu_per_node * args.cpu_per_task, + nodes=args.num_nodes, + slurm_qos=args.qos, + slurm_partition=args.partition, + slurm_account=args.account, + slurm_time=args.time * 60, + slurm_signal_delay_s=120, + slurm_additional_parameters=additional_parameters) + launcher = SubmititLauncher(args) + job = executor.submit(launcher) + print(f'submitted job: {job.job_id}') + + +def accelerate_launch(args): + """Single node script launching with Accelerate.""" + opts = ' '.join(args.opts) if len(args.opts) > 0 else '' + opts += f' num_gpu={args.num_nodes * args.gpu_per_node} ' + multi_gpu = '--multi_gpu' if args.num_nodes * args.gpu_per_node > 1 else '' + full_cfg_path = Path(args.config) + cfg_path, cfg_file = str(full_cfg_path.parent), str(full_cfg_path.name) + cmd = f'accelerate launch \ + --num_machines {args.num_nodes} \ + {multi_gpu} \ + --mixed_precision {args.mixed_precision} \ + --num_processes {args.gpu_per_node * args.num_nodes} \ + --num_cpu_threads_per_process {args.cpu_per_task} \ + --dynamo_backend no \ + {args.run_file} \ + --config-path {cfg_path} \ + --config-name {cfg_file} \ + num_gpu={args.num_nodes * args.gpu_per_node} \ + hydra.run.dir=. \ + hydra.output_subdir=null \ + ~hydra.job_logging.handlers.file \ + hydra.job_logging.root.handlers=[console] \ + hydra/hydra_logging=disabled {opts}' + + subprocess.run(cmd, shell=True) + + +def python_launch(args): + """Vanilla python launcher for degbugging purposes.""" + opts = ' '.join(args.opts) if len(args.opts) > 0 else '' + full_cfg_path = Path(args.config) + cfg_path, cfg_file = str(full_cfg_path.parent), str(full_cfg_path.name) + cmd = f'python {args.run_file} \ + --config-path {cfg_path} \ + --config-name {cfg_file} \ + num_gpu=1 \ + hydra.run.dir=. \ + hydra.output_subdir=null \ + ~hydra.job_logging.handlers.file \ + hydra.job_logging.root.handlers=[console] \ + hydra/hydra_logging=disabled {opts}' + + subprocess.run(cmd, shell=True) diff --git a/models/LEO/common/misc.py b/models/LEO/common/misc.py new file mode 100755 index 0000000..cda1557 --- /dev/null +++ b/models/LEO/common/misc.py @@ -0,0 +1,279 @@ +import collections +import functools +import re +from typing import Any + +import torch +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.scheduler import AcceleratedScheduler +from accelerate.state import PartialState +from accelerate.utils import DistributedType, recursively_apply +from accelerate.utils.constants import TORCH_DISTRIBUTED_OPERATION_TYPES +from torch._six import string_classes +from torch.utils.data import random_split + +try: + from torch.optim.lr_scheduler import LRScheduler +except ImportError: + from torch.optim.lr_scheduler import _LRScheduler as LRScheduler + +logger = get_logger(__name__) + + +def rsetattr(obj, attr, val): + pre, _, post = attr.rpartition('.') + return setattr(rgetattr(obj, pre) if pre else obj, post, val) + + +# using wonder's beautiful simplification: https://stackoverflow.com/questions/31174295/getattr-and-setattr-on-nested-objects/31174427?noredirect=1#comment86638618_31174427 + + +def rgetattr(obj, attr, *args): + + def _getattr(obj, attr): + return getattr(obj, attr, *args) + + return functools.reduce(_getattr, [obj] + attr.split('.')) + + +def default_collate(batch): + r""" + Modify torch.utils.data.default_collate to support collating variable-length lists + """ + np_str_obj_array_pattern = re.compile(r'[SaUO]') + default_collate_err_msg_format = ( + 'default_collate: batch must contain tensors, numpy arrays, numbers, ' + 'dicts or lists; found {}') + + elem = batch[0] + elem_type = type(elem) + if isinstance(elem, torch.Tensor): + out = None + if torch.utils.data.get_worker_info() is not None: + # If we're in a background process, concatenate directly into a + # shared memory tensor to avoid an extra copy + numel = sum(x.numel() for x in batch) + storage = elem.storage()._new_shared(numel, device=elem.device) + out = elem.new(storage).resize_(len(batch), *list(elem.size())) + return torch.stack(batch, 0, out=out) + elif elem_type.__module__ == 'numpy' and elem_type.__name__ != 'str_' \ + and elem_type.__name__ != 'string_': + if elem_type.__name__ == 'ndarray' or elem_type.__name__ == 'memmap': + # array of string classes and object + if np_str_obj_array_pattern.search(elem.dtype.str) is not None: + raise TypeError( + default_collate_err_msg_format.format(elem.dtype)) + + return default_collate([torch.as_tensor(b) for b in batch]) + elif elem.shape == (): # scalars + return torch.as_tensor(batch) + elif isinstance(elem, float): + return torch.tensor(batch, dtype=torch.float64) + elif isinstance(elem, int): + return torch.tensor(batch) + elif isinstance(elem, string_classes): + return batch + elif isinstance(elem, collections.abc.Mapping): + try: + return elem_type({ + key: default_collate([d[key] for d in batch]) + for key in elem + }) + except TypeError: + # The mapping type may not support `__init__(iterable)`. + return { + key: default_collate([d[key] for d in batch]) + for key in elem + } + elif isinstance(elem, tuple) and hasattr(elem, '_fields'): # namedtuple + return elem_type(*(default_collate(samples) + for samples in zip(*batch))) + elif isinstance(elem, collections.abc.Sequence): + """custom part: directly return for lists and tuples.""" + return batch + + raise TypeError(default_collate_err_msg_format.format(elem_type)) + + +def split_train_set(train_set, epochs): + train_subset_base = len(train_set) // epochs + train_subset_res = len(train_set) % epochs + return random_split(train_set, + [train_subset_base + 1] * (train_subset_res) + + [train_subset_base] * (epochs - train_subset_res)) + + +# Customize operations for gathering +def _gpu_gather_object(object: Any): + # by JY Huang: re-implement the method for gathering non-tensor objects + output_objects = [None for _ in range(PartialState().num_processes)] + torch.distributed.all_gather_object(output_objects, object) + if isinstance(object, (list, tuple)): + output_list = [] + for item in output_objects: + output_list.extend(item) + return output_list + elif isinstance(object, dict): + template = output_objects[0] + output_dict = {} + for k, v in template.items(): + if v is None or not hasattr(v, '__iter__'): + output_dict[k] = v + continue + output_dict[k] = [] + for item in output_objects: + output_dict[k].extend(item[k]) + return output_dict + + +def gather_object(object: Any): + """Recursively gather object in a nested list/tuple/dictionary of objects + from all devices. + + Args: + object (nested list/tuple/dictionary of picklable object): + The data to gather. + + Returns: + The same data structure as `object` with all the objects sent to every device. + """ + if PartialState().distributed_type == DistributedType.TPU: + raise NotImplementedError('gather objects in TPU is not supported') + elif PartialState().distributed_type in TORCH_DISTRIBUTED_OPERATION_TYPES: + return _gpu_gather_object(object) + else: + return object + + +""" +Customize Accelerator to support: + 1. advanced gather_for_metrics + 2. only saving partial model weights when calling save_state +""" + + +class CustomAccelerator(Accelerator): + + def gather_for_metrics(self, input_data): + # by JY Huang: re-implement this method for gathering non-tensor objects + try: + recursively_apply(lambda x: x, + input_data, + error_on_other_type=True) + all_tensors = True + except TypeError: + all_tensors = False + + if not all_tensors: + """custom part 1.""" + data = gather_object(input_data) + """ custom part 1 """ + else: + data = self.gather(input_data) + + try: + if self.gradient_state.end_of_dataloader: + # at the end of a dataloader, `gather_for_metrics` regresses to + # `gather` unless the dataset has a remainder so log. + if self.gradient_state.remainder == -1: + logger.info( + 'The used dataset had no length, returning gathered tensors. You should drop the remainder yourself.' + ) + return data + elif self.gradient_state.remainder > 0: + """custom part 2.""" + + # Last batch needs to be truncated on distributed systems as it contains additional samples + def _adjust_samples(tensor): + return tensor[:self.gradient_state. + remainder] if tensor is not None else None + + if all_tensors: + # This only applies to tensors, as defined in `recursively_apply` + return recursively_apply(_adjust_samples, data) + else: + if isinstance(data, (list, tuple)): + return _adjust_samples(data) + elif isinstance(data, dict): + return { + k: _adjust_samples(v) + for k, v in data.items() + } + else: + raise NotImplementedError( + f'Non-tensor gather only supports list, tuple or dict' + ) + """ custom part 2 """ + else: # remainder is 0 + # no remainder even though at end of dataloader, so nothing to do. + return data + else: + # Not at the end of the dataloader, no need to adjust the tensors + return data + except Exception: + # Dataset had no length or raised an error + return data + + def get_state_dict(self, model, unwrap=True): + # only save learnable parameters + if self.distributed_type == DistributedType.DEEPSPEED: + if self.deepspeed_config['zero_optimization']['stage'] == 3: + if model.zero_gather_16bit_weights_on_model_save(): + state_dict = model._zero3_consolidated_16bit_state_dict() + else: + raise ValueError( + 'Cannot get 16bit model weights because `stage3_gather_16bit_weights_on_model_save` in DeepSpeed config is False. ' + 'To save the model weights in 16bit, set `stage3_gather_16bit_weights_on_model_save` to True in DeepSpeed config file or ' + 'set `zero3_save_16bit_model` to True when using `accelerate config`. ' + 'To save the full checkpoint, run `model.save_checkpoint(save_dir)` and use `zero_to_fp32.py` to recover weights.' + ) + else: + from deepspeed.checkpoint.utils import \ + clone_tensors_for_torch_save + + state_dict = clone_tensors_for_torch_save( + self.unwrap_model(model).state_dict()) + elif self.distributed_type == DistributedType.FSDP: + from torch.distributed.fsdp import FullStateDictConfig + from torch.distributed.fsdp import FullyShardedDataParallel as FSDP + from torch.distributed.fsdp import StateDictType + + full_state_dict_config = FullStateDictConfig(offload_to_cpu=True, + rank0_only=True) + with FSDP.state_dict_type(model, StateDictType.FULL_STATE_DICT, + full_state_dict_config): + state_dict = model.state_dict() + else: + if unwrap: + model = self.unwrap_model(model) + state_dict = model.state_dict() + """ custom part """ + keys_list = list(state_dict.keys()) + for k in keys_list: + if k not in self.learn_params_list: # need to assign `learn_params_list` before calling this method + del state_dict[k] + """ custom part """ + + return state_dict + + def prepare_scheduler(self, scheduler: LRScheduler): + # Ensure we can't double wrap a scheduler due to `find_batch_size` + if getattr(scheduler, '_is_accelerate_prepared', False): + if scheduler not in self._schedulers: + self._schedulers.append(scheduler) + return scheduler + # We try to find the optimizer associated with `scheduler`, the default is the full list. + optimizer = self._optimizers + for opt in self._optimizers: + if getattr(scheduler, 'optimizer', None) == opt.optimizer: + optimizer = opt + break + scheduler = AcceleratedScheduler( + scheduler, + optimizer, + step_with_optimizer=self.step_scheduler_with_optimizer, + split_batches=True, # custom, for proper scheduler.step() + ) + self._schedulers.append(scheduler) + return scheduler diff --git a/models/LEO/common/type_utils.py b/models/LEO/common/type_utils.py new file mode 100755 index 0000000..14bc9c5 --- /dev/null +++ b/models/LEO/common/type_utils.py @@ -0,0 +1,32 @@ +import torch +from omegaconf import OmegaConf + + +def cfg2dict(cfg): + return OmegaConf.to_container(cfg, resolve=True) + + +def _to_device(state, device): + """usually load from cpu checkpoint but need to load to cuda.""" + if isinstance(state, torch.Tensor): + new_state = state.to( + device, + non_blocking=True) # assume propoerly set py torch.cuda.set_device + elif isinstance(state, list): + new_state = torch.tensor([_to_device(t, device) + for t in state]).to(device) + elif isinstance(state, tuple): + new_state = torch.tensor(tuple(_to_device(t, device) + for t in state)).to(device) + elif isinstance(state, dict): + new_state = {n: _to_device(t, device) for n, t in state.items()} + else: + try: + if not isinstance(state, str): + new_state = torch.tensor(state).to(device) + else: + new_state = state + except: + raise ValueError( + f'The provided tensor can not be transfered to {device}') + return new_state diff --git a/models/LEO/configs/data/default.yaml b/models/LEO/configs/data/default.yaml new file mode 100644 index 0000000..c2507f3 --- /dev/null +++ b/models/LEO/configs/data/default.yaml @@ -0,0 +1,104 @@ +scan_family_base: scandata #/leo-data/scannet +rscan_base: scandata/leo-data/3RScan-base + +alignment_base: scandata/leo-data/annotations/alignment +instruction_base: scandata/leo-data/annotations/instruction + +embodied_scan_anno_base: ../../data/mmscan_anno + +embodied_scan_info_base: ../../data/mmscan_info + +max_obj_len: ${dataset_wrapper_args.max_obj_len} +num_points: 1024 +img_size: [224, 224] + + +# cap3d: +# cap3d_root: ${data.alignment_base}/obj_caption +# num_points: ${data.num_points} + +# obj_scene_cap: +# rscan_base: ${data.rscan_base} +# scannet_base: ${data.scan_family_base} +# anno_dir: ${data.alignment_base}/obj_scene_caption +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} + +# scene_cap: +# rscan_base: ${data.rscan_base} +# anno_dir: ${data.alignment_base}/scene_caption +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} + +# scan2cap: +# scannet_base: ${data.scan_family_base} +# anno_dir: ${data.instruction_base}/scan2cap +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} +# pc_type: gt # overrided from CLI +# iou_thres: 0.5 +# corpus: ${data.instruction_base}/scan2cap/scanrefer_corpus.json + +# scanqa: +# scannet_base: ${data.scan_family_base} +# anno_dir: ${data.instruction_base}/scanqa +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} +# pc_type: gt # overrided from CLI + +# sqa3d: +# scannet_base: ${data.scan_family_base} +# anno_dir: ${data.instruction_base}/sqa3d +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} +# pc_type: gt # overrided from CLI + +# rscan_qa: +# rscan_base: ${data.rscan_base} +# anno_dir: ${data.instruction_base}/3rscanqa +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} + +# rscan_plan: +# rscan_base: ${data.rscan_base} +# anno_dir: ${data.instruction_base}/planning +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} + +# rscan_dialog: +# rscan_base: ${data.rscan_base} +# anno_dir: ${data.instruction_base}/dialogue +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} + +# mp3d_objnav: +# base_dir: ${data.instruction_base}/mp3d_objnav +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} +# max_traj_len: 500 +# history_length: 4 +# num_pred: 1 +# img_size: ${data.img_size} +# scene_object_deterministic: True # if so, the same objects will be sampled for a scene + +# cliport: +# base_dir: ${data.instruction_base}/cliport +# max_obj_len: ${data.max_obj_len} +# num_points: ${data.num_points} +# history_length: 4 +# img_size: ${data.img_size} + +embodied_scan_l: + anno_dir: ${data.embodied_scan_anno_base}/QA_jinli # path to mmscan qa folder + max_obj_len: ${data.max_obj_len} + num_points: ${data.num_points} + test_scene_ids: ${data.embodied_scan_info_base}/test_scene_ids.txt + pc_type: gt # overrided from CLI + +embodied_scan_c: + anno_dir: + - "${data.embodied_scan_anno_base}/object_caption.json" + - "${data.embodied_scan_anno_base}/region_caption.json" + max_obj_len: ${data.max_obj_len} + num_points: ${data.num_points} + pc_type: gt # overrided from CLI diff --git a/models/LEO/configs/default_train.yaml b/models/LEO/configs/default_train.yaml new file mode 100644 index 0000000..4b504f6 --- /dev/null +++ b/models/LEO/configs/default_train.yaml @@ -0,0 +1,67 @@ +defaults: + - data: default + - task: tuning_noact + - llm: vicuna7b + - vision2d: convnext + - vision3d: ose3d_pointnetpp + - _self_ + +# exp general info +name: debug_box # project name of wandb +note: default # run name of wandb + +rng_seed: 42 +num_gpu: 4 # will be overrided by launch.py +mode: train +naming_keywords: [note] # choose keywords to feature the exp run dir +base_dir: ./logs +exp_dir: "" # temporarily empty, will be set by run.py as base_dir + name + *naming_keywords +pretrained_ckpt_path: "" # specified on launch + +logger: + name: wandb + entity: TBD + +dataset_wrapper_args: + max_obj_len: 60 + +dataloader: + train: + batchsize: 4 # per-gpu batchsize + num_workers: 4 + eval: + batchsize: 4 # per-gpu batchsize + num_workers: 4 + +trainer: LeoTrainer +training: + epochs: ${task.training.epochs} + gradient_accumulation_steps: 5 + grad_norm: 5.0 + optim: + name: AdamW + args: + lr: ${task.training.lr} + betas: [0.9, 0.999] + weight_decay: 0.05 + schedule: + name: linear_warmup_cosine_decay + args: + warmup_steps: 400 + +eval: + num_batch_val: 50 + val_interval: 2 + +# model misc +clip_txt_guidance: + flag: False # for eai + clip_out_dim: 1024 + +# inference +probe: + sources: [3rscan] + scene_ids: [0cac75d0-8d6f-2d13-8c26-d771a31c3f50] + situations: "" + instructions: [Describe this scene.] + save_obj_tokens: True diff --git a/models/LEO/configs/default_val.yaml b/models/LEO/configs/default_val.yaml new file mode 100644 index 0000000..28a087e --- /dev/null +++ b/models/LEO/configs/default_val.yaml @@ -0,0 +1,67 @@ +defaults: + - data: default + - task: tuning_noact + - llm: vicuna7b + - vision2d: convnext + - vision3d: ose3d_pointnetpp + - _self_ + +# exp general info +name: debug_box # project name of wandb +note: default # run name of wandb + +rng_seed: 42 +num_gpu: 4 # will be overrided by launch.py +mode: val +naming_keywords: [note] # choose keywords to feature the exp run dir +base_dir: ./logs +exp_dir: "" # temporarily empty, will be set by run.py as base_dir + name + *naming_keywords +pretrained_ckpt_path: "" # specified on launch + +logger: + name: wandb + entity: TBD + +dataset_wrapper_args: + max_obj_len: 60 + +dataloader: + train: + batchsize: 4 # per-gpu batchsize + num_workers: 4 + eval: + batchsize: 4 # per-gpu batchsize + num_workers: 4 + +trainer: LeoTrainer +training: + epochs: ${task.training.epochs} + gradient_accumulation_steps: 5 + grad_norm: 5.0 + optim: + name: AdamW + args: + lr: ${task.training.lr} + betas: [0.9, 0.999] + weight_decay: 0.05 + schedule: + name: linear_warmup_cosine_decay + args: + warmup_steps: 400 + +eval: + num_batch_val: 50 + val_interval: 2 + +# model misc +clip_txt_guidance: + flag: False # for eai + clip_out_dim: 1024 + +# inference +probe: + sources: [3rscan] + scene_ids: [0cac75d0-8d6f-2d13-8c26-d771a31c3f50] + situations: "" + instructions: [Describe this scene.] + save_obj_tokens: True diff --git a/models/LEO/configs/llm/opt1.3b.yaml b/models/LEO/configs/llm/opt1.3b.yaml new file mode 100644 index 0000000..e0a44c9 --- /dev/null +++ b/models/LEO/configs/llm/opt1.3b.yaml @@ -0,0 +1,11 @@ +name: OPT1.3B +cfg_path: TBD +truncation_side: right +max_context_len: 256 +max_out_len: 256 +lora: + flag: True + rank: 16 + alpha: 16 + target_modules: [q_proj, k_proj, v_proj, out_proj] + dropout: 0.0 diff --git a/models/LEO/configs/llm/vicuna13b.yaml b/models/LEO/configs/llm/vicuna13b.yaml new file mode 100644 index 0000000..a53d427 --- /dev/null +++ b/models/LEO/configs/llm/vicuna13b.yaml @@ -0,0 +1,11 @@ +name: Vicuna13B +cfg_path: TBD +truncation_side: right +max_context_len: 256 +max_out_len: 256 +lora: + flag: True + rank: 16 + alpha: 16 + target_modules: [q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj] + dropout: 0.0 diff --git a/models/LEO/configs/llm/vicuna7b.yaml b/models/LEO/configs/llm/vicuna7b.yaml new file mode 100644 index 0000000..c378808 --- /dev/null +++ b/models/LEO/configs/llm/vicuna7b.yaml @@ -0,0 +1,11 @@ +name: Vicuna7B +cfg_path: weights/vicuna-7b +truncation_side: right +max_context_len: 256 +max_out_len: 256 +lora: + flag: True + rank: 16 + alpha: 16 + target_modules: [q_proj, k_proj, v_proj, o_proj, gate_proj, up_proj, down_proj] + dropout: 0.0 diff --git a/models/LEO/configs/task/check.yaml b/models/LEO/configs/task/check.yaml new file mode 100644 index 0000000..34c9431 --- /dev/null +++ b/models/LEO/configs/task/check.yaml @@ -0,0 +1,31 @@ +# train +training: + lr: 3e-5 + epochs: 10 + +leomix: + mode: [train] + dataset: LeoMix + mix: [embodied_scan_t] # [scan2cap, scanqa, sqa3d, rscan_qa, rscan_plan, rscan_dialog] + ratio: 1.0 + dataset_wrapper: LeoObjPadDatasetWrapper + dataset_wrapper_args: ${dataset_wrapper_args} + train_dataloader_args: ${dataloader.train} + eval_dataloader_args: ${dataloader.eval} + + +# embodied_scan_c: +# mode: [val, test] +# dataset: LeoEmbodiedScanC +# dataset_wrapper: LeoObjPadDatasetWrapper +# dataset_wrapper_args: ${dataset_wrapper_args} +# eval_dataloader_args: ${dataloader.eval} +# evaluator: ScanQAEvaluator + +embodied_scan_t: + mode: [val, test] + dataset: LeoEmbodiedScanTest + dataset_wrapper: LeoObjPadDatasetWrapper + dataset_wrapper_args: ${dataset_wrapper_args} + eval_dataloader_args: ${dataloader.eval} + evaluator: ScanQAEvaluator diff --git a/models/LEO/configs/task/tuning_noact.yaml b/models/LEO/configs/task/tuning_noact.yaml new file mode 100644 index 0000000..b2eabbf --- /dev/null +++ b/models/LEO/configs/task/tuning_noact.yaml @@ -0,0 +1,31 @@ +# train +training: + lr: 3e-5 + epochs: 1 + +leomix: + mode: [train] + dataset: LeoMix + mix: [embodied_scan_l] # [scan2cap, scanqa, sqa3d, rscan_qa, rscan_plan, rscan_dialog] + ratio: 1.0 + dataset_wrapper: LeoObjPadDatasetWrapper + dataset_wrapper_args: ${dataset_wrapper_args} + train_dataloader_args: ${dataloader.train} + eval_dataloader_args: ${dataloader.eval} + + +# embodied_scan_c: +# mode: [val, test] +# dataset: LeoEmbodiedScanC +# dataset_wrapper: LeoObjPadDatasetWrapper +# dataset_wrapper_args: ${dataset_wrapper_args} +# eval_dataloader_args: ${dataloader.eval} +# evaluator: ScanQAEvaluator + +embodied_scan_l: + mode: [val, test] + dataset: LeoEmbodiedScanL + dataset_wrapper: LeoObjPadDatasetWrapper + dataset_wrapper_args: ${dataset_wrapper_args} + eval_dataloader_args: ${dataloader.eval} + evaluator: MMScanEvaluator diff --git a/models/LEO/configs/vision2d/convnext.yaml b/models/LEO/configs/vision2d/convnext.yaml new file mode 100644 index 0000000..9db4ffa --- /dev/null +++ b/models/LEO/configs/vision2d/convnext.yaml @@ -0,0 +1,6 @@ +name: GridFeatureExtractor2D +backbone_name: convnext_base +backbone_pretrain_dataset: laion2b +use_pretrain: True +pooling: avg # null, avg, conv, attn +freeze: True diff --git a/models/LEO/configs/vision3d/backbone/pointbert.yaml b/models/LEO/configs/vision3d/backbone/pointbert.yaml new file mode 100644 index 0000000..a5ffe92 --- /dev/null +++ b/models/LEO/configs/vision3d/backbone/pointbert.yaml @@ -0,0 +1,14 @@ +net: + _target_: model.pointbert.pointbert.PointBERT + trans_dim: 384 + depth: 12 + drop_path_rate: 0.1 + cls_dim: 40 + num_heads: 6 + group_size: 16 # 32 + num_group: 128 # 512 + encoder_dims: 256 + add_RGB: True + +path: weights/pointbert_pointllm.pth +freeze: True diff --git a/models/LEO/configs/vision3d/backbone/pointnetpp.yaml b/models/LEO/configs/vision3d/backbone/pointnetpp.yaml new file mode 100644 index 0000000..4f93560 --- /dev/null +++ b/models/LEO/configs/vision3d/backbone/pointnetpp.yaml @@ -0,0 +1,9 @@ +net: + _target_: model.pointnetpp.pointnetpp.PointNetPP + sa_n_points: [32, 16, null] + sa_n_samples: [32, 32, null] + sa_radii: [0.2, 0.4, null] + sa_mlps: [[3, 64, 64, 128], [128, 128, 128, 256], [256, 256, 512, 768]] + +path: weights/pointbert_pointllm.pth +freeze: True diff --git a/models/LEO/configs/vision3d/backbone/pointnext.yaml b/models/LEO/configs/vision3d/backbone/pointnext.yaml new file mode 100644 index 0000000..f4d85d7 --- /dev/null +++ b/models/LEO/configs/vision3d/backbone/pointnext.yaml @@ -0,0 +1,23 @@ +net: + _target_: model.pointnext.pointnext.PointNext + in_channels: 3 + width: 32 + num_blocks: [1, 4, 7, 4, 4] + strides: [1, 4, 4, 4, 4] + block: InvResMLP + nsample: 32 + radius: 0.1 + conv_args: + order: conv-norm-act + aggr_args: + feature_type: dp_fj + reduction: max + group_args: + NAME: ballquery + radius: ${vision3d.backbone.net.radius} + nsample: ${vision3d.backbone.net.nsample} + sa_layers: 1 + sa_use_res: False + +path: TBD +freeze: True diff --git a/models/LEO/configs/vision3d/ose3d_pointnetpp.yaml b/models/LEO/configs/vision3d/ose3d_pointnetpp.yaml new file mode 100644 index 0000000..5b33182 --- /dev/null +++ b/models/LEO/configs/vision3d/ose3d_pointnetpp.yaml @@ -0,0 +1,22 @@ +defaults: + - backbone: pointnetpp + +name: OSE3D +hidden_dim: 256 +use_spatial_attn: True +use_embodied_token: True +fourier_size: 84 + +spatial_encoder: + dim_loc: 6 + num_attention_heads: 8 + dim_feedforward: 2048 + dropout: 0.1 + activation: gelu + spatial_multihead: True + spatial_dim: 5 # 1, 4, 5 + spatial_dist_norm: True + spatial_attn_fusion: cond # cond, mul, bias, ctx, add + num_layers: 3 + obj_loc_encoding: same_all # same_0, same_all, diff_all + pairwise_rel_type: center diff --git a/models/LEO/data/__init__.py b/models/LEO/data/__init__.py new file mode 100755 index 0000000..bd2cb74 --- /dev/null +++ b/models/LEO/data/__init__.py @@ -0,0 +1,2 @@ +from .dataset_wrapper import * +from .datasets import * diff --git a/models/LEO/data/build.py b/models/LEO/data/build.py new file mode 100755 index 0000000..4e6e2c0 --- /dev/null +++ b/models/LEO/data/build.py @@ -0,0 +1,32 @@ +from common.misc import default_collate +from fvcore.common.registry import Registry +from torch.utils.data import DataLoader + +DATASET_REGISTRY = Registry('Dataset') +DATASETWRAPPER_REGISTRY = Registry('DatasetWrapper') + + +def get_dataset_leo(cfg, split, dataset_name, dataset_wrapper_name, + dataset_wrapper_args): + # just get dataset directly and then wrap it + dataset = DATASET_REGISTRY.get(dataset_name)(cfg, split) + + if dataset_wrapper_name: + dataset = DATASETWRAPPER_REGISTRY.get(dataset_wrapper_name)( + dataset, dataset_wrapper_args) + + return dataset + + +def build_dataloader_leo(cfg, split, dataset_name, dataset_wrapper_name, + dataset_wrapper_args, dataloader_args): + dataset = get_dataset_leo(cfg, split, dataset_name, dataset_wrapper_name, + dataset_wrapper_args) + return DataLoader(dataset, + batch_size=dataloader_args.batchsize, + num_workers=dataloader_args.num_workers, + collate_fn=getattr(dataset, 'collate_fn', + default_collate), + pin_memory=True, + shuffle=True if split == 'train' else False, + drop_last=True if split == 'train' else False) diff --git a/models/LEO/data/data_utils.py b/models/LEO/data/data_utils.py new file mode 100755 index 0000000..2af8dcd --- /dev/null +++ b/models/LEO/data/data_utils.py @@ -0,0 +1,1398 @@ +import random +import re + +import cv2 +import numpy as np +# import open3d as o3d +import quaternion +import torch +import trimesh as tm +from scipy.spatial.transform import Rotation as R + +PIX_MEAN = (0.485, 0.456, 0.406) +PIX_STD = (0.229, 0.224, 0.225) + +# TODO(jxma): these are least used tokens (with largest token ID) of Vicuna +# {token: token_id} +VICUNA_ACTION_TOKENS = { + '给': 31999, + '弘': 31998, + '收': 31997, + '왕': 31996, + '黃': 31995, + '还': 31994, + '边': 31993, + 'べ': 31992, + 'げ': 31991, + 'ὀ': 31990, + '백': 31989, + '泰': 31988, + '역': 31987, + '联': 31986, + '怪': 31985, + '奇': 31984, + 'ɯ': 31983, + '番': 31982, + '止': 31981, + '합': 31980, + '才': 31979, + 'ფ': 31978, + '两': 31977, + '명': 31976, + '房': 31975, + '候': 31974, + '재': 31973, + '교': 31972, + '遠': 31971, + '計': 31970, + '故': 31969, + '丁': 31968, + 'ญ': 31967, + '음': 31966, + '進': 31965, + 'ษ': 31964, + '바': 31963, + '모': 31962, + '嘉': 31961, + '双': 31960, + '些': 31959, + 'ヨ': 31958, + 'ể': 31957, + 'ഞ': 31956, + '败': 31955, + '茶': 31954, + '회': 31953, + '洲': 31952, + '每': 31951, + '월': 31950, + '料': 31949, + '梅': 31948, + '深': 31947, + 'ḏ': 31946, + '방': 31945, + '效': 31944, + '导': 31943, + 'Ē': 31942, + '중': 31941, + '내': 31940, + '舞': 31939, + 'ほ': 31938, + 'Ġ': 31937, + '1': 31936, + '微': 31935, + 'ន': 31934, + '瀬': 31933, + '唐': 31932, + '助': 31931, + '종': 31930, + 'ˇ': 31929, + '現': 31928, + 'थ': 31927, + '𝓝': 31926, + '타': 31925, + '居': 31924, + 'ᵉ': 31923, + 'သ': 31922, + 'ഷ': 31921, + 'ċ': 31920, + 'პ': 31919, + 'ව': 31918, + 'ම': 31917, + '删': 31916, + '客': 31915, + '兴': 31914, + 'ശ': 31913, + '昭': 31912, + '员': 31911, + '仮': 31910, + '̌': 31909, + '反': 31908, + 'ぐ': 31907, + '과': 31906, + '装': 31905, + '操': 31904, + '连': 31903, + '米': 31902, + '构': 31901, + '书': 31900, + '⥤': 31899, + '彦': 31898, + 'ḳ': 31897, + 'ྱ': 31896, + '식': 31895, + '运': 31894, + '种': 31893, + 'ҡ': 31892, + '̍': 31891, + 'ɵ': 31890, + 'ദ': 31889, + '项': 31888, + '貴': 31887, + '洞': 31886, + '巴': 31885, + 'ѫ': 31884, + '達': 31883, + '么': 31882, + '\\u202d': 31881, + 'ً': 31880, + '▓': 31879, + '˚': 31878, + '飛': 31877, + '頭': 31876, + '孝': 31875, + 'ự': 31874, + 'Έ': 31873, + 'Ÿ': 31872, + '論': 31871, + 'Ħ': 31870, + '红': 31869, + '庄': 31868, + '军': 31867, + 'ὺ': 31866, + 'ක': 31865, + 'ো': 31864, + '健': 31863, + '陈': 31862, + 'ರ': 31861, + 'ھ': 31860, + '速': 31859, + '渡': 31858, + 'ਿ': 31857, + '터': 31856, + '食': 31855, + '菜': 31854, + '池': 31853, + '话': 31852, + '测': 31851, + '溪': 31850, + 'ក': 31849, + '拳': 31848, + '雅': 31847, + '麻': 31846, + '鳥': 31845, + '越': 31844, + '甲': 31843, + 'ỳ': 31842, + '希': 31841, + '❯': 31840, + '望': 31839, + '非': 31838, + '∇': 31837, + '索': 31836, + '确': 31835, + 'む': 31834, + 'ந': 31833, + 'ϊ': 31832, + '塔': 31831, + '近': 31830, + '群': 31829, + 'ც': 31828, + 'Ξ': 31827, + '만': 31826, + '銀': 31825, + '斯': 31824, + '喜': 31823, + '학': 31822, + '़': 31821, + '鬼': 31820, + '样': 31819, + '丸': 31818, + '차': 31817, + 'զ': 31816, + '衛': 31815, + '尔': 31814, + '坂': 31813, + '話': 31812, + '看': 31811, + '复': 31810, + 'ற': 31809, + 'എ': 31808, + '్': 31807, + 'ӏ': 31806, + 'ŝ': 31805, + '들': 31804, + '右': 31803, + 'ḷ': 31802, + 'ြ': 31801, + 'ܝ': 31800, + 'Ě': 31799, + '达': 31798, + 'ữ': 31797, + 'ณ': 31796, + '编': 31795, + 'ˠ': 31794, + '˜': 31793, + '劉': 31792, + '判': 31791, + 'պ': 31790, + '개': 31789, + '隆': 31788, + '试': 31787, + '変': 31786, + '告': 31785, + '云': 31784, + 'Ţ': 31783, + 'ぶ': 31782, + '씨': 31781, + '座': 31780, + '➖': 31779, + 'ᾶ': 31778, + 'ѐ': 31777, + '।': 31776, + 'ပ': 31775, + '강': 31774, + '經': 31773, + 'ₗ': 31772, + '⊤': 31771, + '設': 31770, + 'Ἐ': 31769, + '击': 31768, + '串': 31767, + '∷': 31766, + '々': 31765, + 'ɫ': 31764, + '母': 31763, + '幸': 31762, + 'ず': 31761, + 'ף': 31760, + '朱': 31759, + '店': 31758, + '切': 31757, + '专': 31756, + 'ỹ': 31755, + '남': 31754, + '岩': 31753, + 'ṯ': 31752, + '该': 31751, + '雲': 31750, + '桥': 31749, + 'ķ': 31748, + '면': 31747, + '단': 31746, + '错': 31745, + '忠': 31744, + 'ʎ': 31743, + 'Ė': 31742, + '羅': 31741, + '沢': 31740, + '楽': 31739, + '✿': 31738, + '용': 31737, + '박': 31736, + '默': 31735, + '안': 31734, + '再': 31733, + 'आ': 31732, + '雪': 31731, + '富': 31730, + '业': 31729, + '陳': 31728, + '航': 31727, + 'Ἰ': 31726, + 'į': 31725, + '위': 31724, + 'ရ': 31723, + '足': 31722, + '勝': 31721, + 'շ': 31720, + '̈': 31719, + 'ゼ': 31718, + 'হ': 31717, + '무': 31716, + 'ள': 31715, + '樹': 31714, + '昌': 31713, + 'ා': 31712, + '結': 31711, + '草': 31710, + '竹': 31709, + 'ស': 31708, + '藏': 31707, + 'ふ': 31706, + 'ལ': 31705, + '活': 31704, + '守': 31703, + '址': 31702, + '秀': 31701, + '库': 31700, + '군': 31699, + '親': 31698, + '御': 31697, + '奈': 31696, + '持': 31695, + '官': 31694, + 'ზ': 31693, + '連': 31692, + 'ਸ': 31691, + '⅓': 31690, + '付': 31689, + '首': 31688, + ' 身': 31687, + 'শ': 31686, + '称': 31685, + 'ね': 31684, + '断': 31683, + '赤': 31682, + '✅': 31681, + '현': 31680, + '电': 31679, + 'ै': 31678, + '̩': 31677, + '智': 31676, + '统': 31675, + '引': 31674, + 'ℂ': 31673, + 'Ḫ': 31672, + 'ץ': 31671, + 'ʑ': 31670, + '节': 31669, + 'ή': 31668, + 'ख': 31667, + '并': 31666, + 'গ': 31665, + '߬': 31664, + 'Ս': 31663, + 'ા': 31662, + '別': 31661, + '兵': 31660, + '恋': 31659, + '问': 31658, + '発': 31657, + '打': 31656, + '局': 31655, + '屋': 31654, + '若': 31653, + '漢': 31652, + '左': 31651, + '令': 31650, + '门': 31649, + '気': 31648, + '宝': 31647, + 'ൻ': 31646, + 'ợ': 31645, + 'ེ': 31644, + 'མ': 31643, + '紀': 31642, + '必': 31641, + '换': 31640, + '说': 31639, + 'ൽ': 31638, + '泉': 31637, + 'ර': 31636, + '育': 31635, + '': 31634, + '介': 31633, + '场': 31632, + '尾': 31631, + 'ẓ': 31630, + '函': 31629, + '⇔': 31628, + '戸': 31627, + '╣': 31626, + 'ൾ': 31625, + '管': 31624, + '್': 31623, + 'ご': 31622, + 'ゆ': 31621, + 'ụ': 31620, + '影': 31619, + '移': 31618, + '控': 31617, + '乐': 31616, + '技': 31615, + 'ན': 31614, + '态': 31613, + '宿': 31612, + '共': 31611, + '页': 31610, + 'න': 31609, + ';': 31608, + '그': 31607, + '関': 31606, + '素': 31605, + 'ਰ': 31604, + '호': 31603, + '葉': 31602, + 'ུ': 31601, + '省': 31600, + '展': 31599, + 'ἡ': 31598, + 'ˆ': 31597, + '题': 31596, + 'ী': 31595, + '从': 31594, + '汉': 31593, + '夢': 31592, + '⍵': 31591, + '按': 31590, + '▇': 31589, + '┃': 31588, + '車': 31587, + '∉': 31586, + 'ർ': 31585, + '头': 31584, + '-': 31583, + '민': 31582, + '聖': 31581, + '死': 31580, + '思': 31579, + '세': 31578, + '康': 31577, + '∆': 31576, + 'Մ': 31575, + '̱': 31574, + '输': 31573, + 'ے': 31572, + '년': 31571, + '因': 31570, + '秋': 31569, + '视': 31568, + 'រ': 31567, + '广': 31566, + '算': 31565, + '業': 31564, + '천': 31563, + '選': 31562, + '區': 31561, + 'တ': 31560, + '段': 31559, + '起': 31558, + '只': 31557, + 'ủ': 31556, + '\\x9d': 31555, + 'ց': 31554, + '黒': 31553, + '়': 31552, + '像': 31551, + '⊂': 31550, + '師': 31549, + '处': 31548, + 'ธ': 31547, + '隊': 31546, + '送': 31545, + 'ὑ': 31544, + '拉': 31543, + '显': 31542, + '支': 31541, + '機': 31540, + '球': 31539, + '添': 31538, + 'জ': 31537, + '진': 31536, + '万': 31535, + '洋': 31534, + '유': 31533, + '线': 31532, + '状': 31531, + '马': 31530, + '波': 31529, + 'ℚ': 31528, + '요': 31527, + '载': 31526, + '実': 31525, + 'ユ': 31524, + '‖': 31523, + '想': 31522, + 'Ď': 31521, + '服': 31520, + '報': 31519, + 'ǧ': 31518, + '를': 31517, + '然': 31516, + 'ⴰ': 31515, + 'ἱ': 31514, + 'ɹ': 31513, + '\\x99': 31512, + '☉': 31511, + '克': 31510, + '鉄': 31509, + 'Ṭ': 31508, + '例': 31507, + '老': 31506, + '语': 31505, + '張': 31504, + '宇': 31503, + '何': 31502, + 'ペ': 31501, + '̂': 31500, + 'ⁿ': 31499, + 'ိ': 31498, + 'ք': 31497, + '湖': 31496, + '景': 31495, + '🌍': 31494, + '드': 31493, + '∙': 31492, + '黄': 31491, + 'ǫ': 31490, + 'Ḩ': 31489, + 'հ': 31488, + '비': 31487, + '⊗': 31486, + 'ි': 31485, + '森': 31484, + '┈': 31483, + '今': 31482, + 'ய': 31481, + '超': 31480, + '写': 31479, + '【': 31478, + '⸮': 31477, + '沙': 31476, + '去': 31475, + '意': 31474, + '包': 31473, + '】': 31472, + '传': 31471, + 'ʋ': 31470, + 'ύ': 31469, + 'Ă': 31468, + '曲': 31467, + '计': 31466, + '∣': 31465, + '♀': 31464, + '序': 31463, + '变': 31462, + '密': 31461, + '◦': 31460, + 'န': 31459, + '산': 31458, + '여': 31457, + '帝': 31456, + '究': 31455, + '布': 31454, + '็': 31453, + 'ི': 31452, + '登': 31451, + '任': 31450, + '港': 31449, + 'ホ': 31448, + 'ड': 31447, + '岡': 31446, + '伝': 31445, + 'ḩ': 31444, + 'ղ': 31443, + '編': 31442, + '创': 31441, + '\\x91': 31440, + '认': 31439, + '術': 31438, + 'ध': 31437, + '及': 31436, + '해': 31435, + 'բ': 31434, + '站': 31433, + '角': 31432, + 'ĉ': 31431, + '阳': 31430, + '机': 31429, + 'ை': 31428, + '商': 31427, + 'Ά': 31426, + '七': 31425, + '现': 31424, + '没': 31423, + 'ื': 31422, + 'ܐ': 31421, + '造': 31420, + '比': 31419, + '⌘': 31418, + '마': 31417, + '崎': 31416, + '转': 31415, + 'ょ': 31414, + 'ू': 31413, + '经': 31412, + '會': 31411, + '记': 31410, + '株': 31409, + '조': 31408, + '被': 31407, + '문': 31406, + 'Ζ': 31405, + '開': 31404, + '则': 31403, + 'ォ': 31402, + 'ང': 31401, + '良': 31400, + '品': 31399, + '交': 31398, + 'ṅ': 31397, + 'ู': 31396, + '玉': 31395, + 'Ī': 31394, + '根': 31393, + '橋': 31392, + '或': 31391, + '夜': 31390, + '此': 31389, + 'へ': 31388, + 'դ': 31387, + 'প': 31386, + '電': 31385, + 'ச': 31384, + '需': 31383, + '模': 31382, + '们': 31381, + 'भ': 31380, + '\\u202c': 31379, + '경': 31378, + 'ण': 31377, + '求': 31376, + 'Ψ': 31375, + '章': 31374, + '友': 31373, + '╚': 31372, + 'က': 31371, + '应': 31370, + '失': 31369, + '注': 31368, + '研': 31367, + '完': 31366, + '津': 31365, + 'โ': 31364, + '軍': 31363, + '미': 31362, + '配': 31361, + '属': 31360, + '基': 31359, + '务': 31358, + '線': 31357, + '那': 31356, + 'ʷ': 31355, + '은': 31354, + '\\u2028': 31353, + '无': 31352, + '╔': 31351, + 'अ': 31350, + '义': 31349, + '\\x9c': 31348, + '久': 31347, + '오': 31346, + '선': 31345, + 'ད': 31344, + 'ề': 31343, + 'അ': 31342, + 'ἔ': 31341, + 'ု': 31340, + 'ך': 31339, + '堂': 31338, + '仁': 31337, + 'ʐ': 31336, + 'ゲ': 31335, + '공': 31334, + '选': 31333, + 'ῥ': 31332, + '向': 31331, + 'ष': 31330, + 'ट': 31329, + '张': 31328, + '우': 31327, + 'བ': 31326, + '而': 31325, + 'ា': 31324, + 'թ': 31323, + '雄': 31322, + '九': 31321, + '结': 31320, + '□': 31319, + 'ứ': 31318, + '̪': 31317, + '⊥': 31316, + '佐': 31315, + 'Ṣ': 31314, + '火': 31313, + 'ゃ': 31312, + 'Ű': 31311, + 'ข': 31310, + 'ϵ': 31309, + '伊': 31308, + 'Հ': 31307, + '제': 31306, + '形': 31305, + '六': 31304, + 'ĝ': 31303, + '提': 31302, + '්': 31301, + '龙': 31300, + '장': 31299, + 'び': 31298, + 'ᴇ': 31297, + '宗': 31296, + '未': 31295, + '容': 31294, + '국': 31293, + 'င': 31292, + '陽': 31291, + '已': 31290, + '┤': 31289, + '영': 31288, + 'ひ': 31287, + '을': 31286, + '연': 31285, + 'ള': 31284, + '录': 31283, + '▲': 31282, + '‾': 31281, + 'ớ': 31280, + '부': 31279, + 'ʌ': 31278, + '符': 31277, + '消': 31276, + '♣': 31275, + '學': 31274, + '修': 31273, + '由': 31272, + 'ქ': 31271, + 'ヴ': 31270, + '╝': 31269, + '调': 31268, + '与': 31267, + '华': 31266, + 'ὲ': 31265, + '改': 31264, + '组': 31263, + '신': 31262, + '̄': 31261, + '府': 31260, + '典': 31259, + 'ヤ': 31258, + 'ἄ': 31257, + 'գ': 31256, + 'ギ': 31255, + 'ば': 31254, + 'ன': 31253, + 'ไ': 31252, + 'ヒ': 31251, + 'ど': 31250, + 'வ': 31249, + 'ਾ': 31248, + 'ძ': 31247, + 'შ': 31246, + '➜': 31245, + '先': 31244, + '言': 31243, + '\\x81': 31242, + '夏': 31241, + '君': 31240, + '龍': 31239, + '就': 31238, + '命': 31237, + '○': 31236, + 'լ': 31235, + '▸': 31234, + 'မ': 31233, + 'ར': 31232, + '구': 31231, + '∫': 31230, + '户': 31229, + 'ေ': 31228, + '阿': 31227, + 'ە': 31226, + '화': 31225, + '≃': 31224, + 'ல': 31223, + '网': 31222, + '他': 31221, + '後': 31220, + 'ὁ': 31219, + 'য': 31218, + '条': 31217, + '╩': 31216, + '╗': 31215, + '̣': 31214, + '查': 31213, + 'ұ': 31212, + '̥': 31211, + 'Û': 31210, + '無': 31209, + 'ག': 31208, + '나': 31207, + 'ろ': 31206, + 'ポ': 31205, + 'দ': 31204, + '男': 31203, + '〜': 31202, + '解': 31201, + '⊕': 31200, + '보': 31199, + '원': 31198, + '라': 31197, + '博': 31196, + '实': 31195, + 'ׁ': 31194, + '源': 31193, + '見': 31192, + '否': 31191, + '常': 31190, + '소': 31189, + '↵': 31188, + '華': 31187, + '∼': 31186, + '系': 31185, + '等': 31184, + '码': 31183, + '放': 31182, + '土': 31181, + '量': 31180, + ' 園': 31179, + '⊢': 31178, + '트': 31177, + '夫': 31176, + '限': 31175, + '进': 31174, + '歌': 31173, + 'ピ': 31172, + '☺': 31171, + '전': 31170, + '德': 31169, + '格': 31168, + 'ʀ': 31167, + '单': 31166, + 'ɣ': 31165, + 'ட': 31164, + '朝': 31163, + 'Ť': 31162, + '館': 31161, + 'ắ': 31160, + '千': 31159, + '상': 31158, + '直': 31157, + '永': 31156, + '្': 31155, + 'ু': 31154, + '일': 31153, + '除': 31152, + '流': 31151, + 'ত': 31150, + '其': 31149, + 'স': 31148, + 'Ъ': 31147, + 'ണ': 31146, + 'ấ': 31145, + '英': 31144, + '长': 31143, + 'ậ': 31142, + '特': 31141, + '皇': 31140, + 'վ': 31139, + '过': 31138, + '고': 31137, + '도': 31136, + '♂': 31135, + ' 功': 31134, + '象': 31133, + 'च': 31132, + '義': 31131, + 'ხ': 31130, + '어': 31129, + '╦': 31128, + 'Ə': 31127, + '성': 31126, + '参': 31125, + '動': 31124, + 'ザ': 31123, + '片': 31122, + '福': 31121, + '初': 31120, + '┘': 31119, + '∅': 31118, + '期': 31117, + '،': 31116, + 'じ': 31115, + '♯': 31114, + '香': 31113, + '谷': 31112, + 'や': 31111, + 'そ': 31110, + '周': 31109, + '県': 31108, + '利': 31107, + 'ച': 31106, + 'ũ': 31105, + 'ོ': 31104, + '郡': 31103, + '김': 31102, + '程': 31101, + '更': 31100, + 'ң': 31099, + '魔': 31098, + '̲': 31097, + '志': 31096, + 'せ': 31095, + '↳': 31094, + '서': 31093, + '接': 31092, + 'ό': 31091, + '風': 31090, + '≫': 31089, + '请': 31088, + '馬': 31087, + '返': 31086, + '色': 31085, + '指': 31084, + '∗': 31083, + '┐': 31082, + '는': 31081, + 'ֶ': 31080, + 'ℓ': 31079, + 'Ù': 31078, + 'ғ': 31077, + '好': 31076, + '門': 31075, + ' 力': 31074, + 'แ': 31073, + '制': 31072, + '校': 31071, + 'ภ': 31070, + '間': 31069, + 'わ': 31068, + '♠': 31067, + '外': 31066, + 'ֵ': 31065, + 'ὴ': 31064, + '니': 31063, + '标': 31062, + 'ベ': 31061, + '∑': 31060, + 'έ': 31059, + 'ġ': 31058, + '关': 31057, + 'ṛ': 31056, + 'ল': 31055, + '에': 31054, + 'ာ': 31053, + '氏': 31052, + 'ソ': 31051, + '得': 31050, + '記': 31049, + '☆': 31048, + '百': 31047, + '画': 31046, + '場': 31045, + ' 八': 31044, + '知': 31043, + 'ά': 31042, + '工': 31041, + 'ĩ': 31040, + 'း': 31039, + 'ネ': 31038, + '台': 31037, + 'ɒ': 31036, + 'ศ': 31035, + 'ས': 31034, + '吉': 31033, + '治': 31032, + '春': 31031, + '科': 31030, + 'კ': 31029, + 'ワ': 31028, + 'ტ': 31027, + '开': 31026, + '列': 31025, + '获': 31024, + '教': 31023, + '少': 31022, + '息': 31021, + '始': 31020, + 'ṃ': 31019, + '松': 31018, + 'fi': 31017, + '间': 31016, + 'ா': 31015, + '政': 31014, + '자': 31013, + 'ब': 31012, + 'Ա': 31011, + 'ป': 31010, + 'श': 31009, + 'ļ': 31008, + '『': 31007, + 'ম': 31006, + '』': 31005, + '宮': 31004, + 'ボ': 31003, + '┌': 31002, + 'Υ': 31001, + '동': 31000 +} + +# def visualize_point_cloud(pc_data, convert_to_0_1=False): +# # assume input RGB is in [0, 1] + +# # in case input RGB is [-1, 1] +# if convert_to_0_1: +# pc_data = (pc_data + 1.0) / 2.0 + +# pc_data = pc_data[:, :6] +# pcd = o3d.geometry.PointCloud() + +# # Extract XYZ and RGB data from the numpy tensor +# xyz = pc_data[:, :3] +# rgb = pc_data[:, 3:] + +# # Assign XYZ and RGB data to the PointCloud object +# pcd.points = o3d.utility.Vector3dVector(xyz) +# pcd.colors = o3d.utility.Vector3dVector(rgb) + +# # Visualize the point cloud +# o3d.visualization.draw_geometries([pcd]) + + +def preprocess_2d(img, size=(224, 224)): + # img: (H, W, 3) + # resize, normalize + img = cv2.resize(img, size) + img = (img / 255 - PIX_MEAN) / PIX_STD + return np.ascontiguousarray(img.transpose(2, 0, 1)) + + +def recover_2d(img): + # img: (H, W, 3) + img = (img * PIX_STD + PIX_MEAN) * 255.0 + return np.ascontiguousarray(img.astype(np.uint8)) + + +def transform_point_cloud(points, translation, orientation): + # points: (N, 3) + # translation: (3,) + # orientation: (4,) + translation = np.array(translation) + orientation = np.quaternion(*orientation) + + # Convert the orientation quaternion to a rotation matrix + rotation_matrix = quaternion.as_rotation_matrix(orientation) + + # Apply the rotation and translation to each point in the point cloud + rotated_points = np.dot( + points, rotation_matrix.T + ) # Transpose the rotation matrix for right multiplication + translated_points = rotated_points + translation + + return translated_points + + +def convert_pc_to_box(obj_pc): + xmin = np.min(obj_pc[:, 0]) + ymin = np.min(obj_pc[:, 1]) + zmin = np.min(obj_pc[:, 2]) + xmax = np.max(obj_pc[:, 0]) + ymax = np.max(obj_pc[:, 1]) + zmax = np.max(obj_pc[:, 2]) + center = [(xmin + xmax) / 2, (ymin + ymax) / 2, (zmin + zmax) / 2] + box_size = [xmax - xmin, ymax - ymin, zmax - zmin] + return center, box_size + + +def build_rotate_mat(split, rot_aug=True, rand_angle='axis'): + if rand_angle == 'random': + theta = np.random.rand() * np.pi * 2 + else: + ROTATE_ANGLES = [0, np.pi / 2, np.pi, np.pi * 3 / 2] + theta = random.choice(ROTATE_ANGLES) + if rot_aug and (split == 'train') and (theta is not None) and (theta != 0): + rot_matrix = np.array([[np.cos(theta), -np.sin(theta), 0], + [np.sin(theta), np.cos(theta), 0], [0, 0, 1]], + dtype=np.float32) + else: + rot_matrix = None + return rot_matrix + + +def eval_ref_one_sample(pred_bbox, gt_bbox): + """ Evaluate one reference prediction + Args: + pred_bbox: 8 corners of prediction bounding box, (8, 3) + gt_bbox: 8 corners of ground truth bounding box, (8, 3) + Returns: + iou: intersection over union score + """ + + iou = box3d_iou(pred_bbox, gt_bbox) + + return iou + + +def get_box3d_min_max(corner): + ''' Compute min and max coordinates for 3D bounding box + Note: only for axis-aligned bounding boxes + Input: + corners: numpy array (8,3), assume up direction is Z (batch of N samples) + Output: + box_min_max: an array for min and max coordinates of 3D bounding box IoU + ''' + + min_coord = corner.min(axis=0) + max_coord = corner.max(axis=0) + x_min, x_max = min_coord[0], max_coord[0] + y_min, y_max = min_coord[1], max_coord[1] + z_min, z_max = min_coord[2], max_coord[2] + + return x_min, x_max, y_min, y_max, z_min, z_max + + +def box3d_iou(corners1, corners2): + """Compute 3D bounding box IoU. + + Input: + corners1: numpy array (8,3), assume up direction is Z + corners2: numpy array (8,3), assume up direction is Z + Output: + iou: 3D bounding box IoU + """ + + x_min_1, x_max_1, y_min_1, y_max_1, z_min_1, z_max_1 = get_box3d_min_max( + corners1) + x_min_2, x_max_2, y_min_2, y_max_2, z_min_2, z_max_2 = get_box3d_min_max( + corners2) + xA = np.maximum(x_min_1, x_min_2) + yA = np.maximum(y_min_1, y_min_2) + zA = np.maximum(z_min_1, z_min_2) + xB = np.minimum(x_max_1, x_max_2) + yB = np.minimum(y_max_1, y_max_2) + zB = np.minimum(z_max_1, z_max_2) + inter_vol = np.maximum((xB - xA), 0) * np.maximum( + (yB - yA), 0) * np.maximum((zB - zA), 0) + box_vol_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) * (z_max_1 - z_min_1) + box_vol_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2) * (z_max_2 - z_min_2) + iou = inter_vol / (box_vol_1 + box_vol_2 - inter_vol + 1e-8) + + return iou + + +def transform_points(points, transform, translate=True, mode='numpy'): + """Apply linear transform to a np array of points. + + Args: + points (np array [..., 3]): Points to transform. + transform (np array [3, 4] or [4, 4]): Linear map. + translate (bool): If false, do not apply translation component of transform. + Returns: + transformed points (np array [..., 3]) + """ + # Append ones or zeros to get homogenous coordinates + if translate: + if mode == 'numpy': + constant_term = np.ones_like(points[..., :1]) + else: + constant_term = torch.ones_like(points[..., :1]) + else: + if mode == 'numpy': + constant_term = np.zeros_like(points[..., :1]) + else: + constant_term = torch.zeros_like(points[..., :1]) + if mode == 'numpy': + points = np.concatenate((points, constant_term), axis=-1) + points = np.einsum('nm,...m->...n', transform, points) + else: + points = torch.cat((points, constant_term), dim=-1) + points = torch.einsum('...nm,...m->...n', transform, points) + return points[..., :3] + + +def construct_bbox_corners(center, box_size): + sx, sy, sz = box_size + x_corners = [ + sx / 2, sx / 2, -sx / 2, -sx / 2, sx / 2, sx / 2, -sx / 2, -sx / 2 + ] + y_corners = [ + sy / 2, -sy / 2, -sy / 2, sy / 2, sy / 2, -sy / 2, -sy / 2, sy / 2 + ] + z_corners = [ + sz / 2, sz / 2, sz / 2, sz / 2, -sz / 2, -sz / 2, -sz / 2, -sz / 2 + ] + corners_3d = np.vstack([x_corners, y_corners, z_corners]) + corners_3d[0, :] = corners_3d[0, :] + center[0] + corners_3d[1, :] = corners_3d[1, :] + center[1] + corners_3d[2, :] = corners_3d[2, :] + center[2] + corners_3d = np.transpose(corners_3d) + + return corners_3d + + +def is_explicitly_view_dependent(tokens): + """ + :return: a boolean mask + """ + target_words = { + 'front', 'behind', 'back', 'right', 'left', 'facing', 'leftmost', + 'rightmost', 'looking', 'across' + } + for token in tokens: + if token in target_words: + return True + return False + + +def load_matrix_from_txt(path, shape=(4, 4)): + with open(path) as f: + txt = f.readlines() + txt = ''.join(txt).replace('\n', ' ') + matrix = [float(v) for v in txt.split()] + return np.array(matrix).reshape(shape) + + +def pad_tensors(tensors, dim=0, lens=None, pad=0): + assert tensors.shape[dim] <= lens + if tensors.shape[dim] == lens: + return tensors + shape = list(tensors.shape) + shape[dim] = lens - shape[dim] + res = torch.ones(shape, dtype=tensors.dtype, device=tensors.device) * pad + res = torch.cat([tensors, res], dim=dim) + return res + + +def get_sqa_question_type(question): + question = question.lstrip() + if question[:4].lower() == 'what': + return 0 + elif question[:2].lower() == 'is': + return 1 + elif question[:3].lower() == 'how': + return 2 + elif question[:3].lower() == 'can': + return 3 + elif question[:5].lower() == 'which': + return 4 + else: + return 5 # others + + +def vis_scene_qa(save_path, + scene_path, + question, + answer_gt, + answer_candidate, + generate_tokens, + generate_text, + situation=None): + # scene + scene = tm.load(scene_path) + v = np.array(scene.vertices) + scene_center = (np.max(v[:, 0:3], axis=0) + np.min(v[:, 0:3], axis=0)) / 2 + scene_transformation = np.array([[1, 0, 0, -scene_center[0]], + [0, 1, 0, -scene_center[1]], + [0, 0, 1, -scene_center[2]], [0, 0, 0, + 1]]) + scene = scene.apply_transform(scene_transformation) + + # situation + if situation is not None: + # (description, position, rotation) + position = situation[1] + rotation = situation[2] + quaternion = [ + rotation['_x'].item(), rotation['_y'].item(), + rotation['_z'].item(), rotation['_w'].item() + ] + rot_mat_3x3 = R.from_quat(quaternion).as_matrix() + rotation_matrix = np.array( + [[rot_mat_3x3[0][0], rot_mat_3x3[0][1], rot_mat_3x3[0][2], 0], + [rot_mat_3x3[1][0], rot_mat_3x3[1][1], rot_mat_3x3[1][2], 0], + [rot_mat_3x3[2][0], rot_mat_3x3[2][1], rot_mat_3x3[2][2], 0], + [0, 0, 0, 1]]) + transformation_matrix = np.array([[1, 0, 0, position['x'].item()], + [0, 1, 0, position['y'].item()], + [0, 0, 1, 0], [0, 0, 0, 1]]) + + # create arrow + cone = tm.creation.cone(radius=0.1, + height=0.20, + sections=None, + transform=None) + rotate_around_y = np.array([[0, 0, -1, 0], [0, 1, 0, 0], [1, 0, 0, 0], + [0, 0, 0, 1]]) + cone = cone.apply_transform(rotate_around_y) + cylinder = tm.creation.cylinder(radius=0.06, + height=0.30, + sections=None, + segment=None, + transform=None) + cylinder = cylinder.apply_transform(rotate_around_y) + mv_2_head = np.array([[1, 0, 0, -0.15], [0, 1, 0, 0], [0, 0, 1, 0], + [0, 0, 0, 1]]) + cone = cone.apply_transform(mv_2_head) + arrow = tm.util.concatenate([cone, cylinder]) + + # apply situation + this_arrow = arrow.apply_transform(rotation_matrix) + this_arrow = this_arrow.apply_transform(transformation_matrix) + this_arrow.visual.vertex_colors = np.zeros((100, 4)) + this_arrow.visual.vertex_colors[:, 0] = 0 + this_arrow.visual.vertex_colors[:, 1] = 255 + this_arrow.visual.vertex_colors[:, 2] = 0 + this_arrow.visual.vertex_colors[:, 3] = 255 + + scene = tm.util.concatenate([scene, this_arrow]) + + scene.export(f'{save_path}.ply') + save_dict = { + 'situation': situation[0] if situation is not None else None, + 'question': question, + 'answer_gt': answer_gt, + 'answer_candidate': answer_candidate, + 'generate_tokens': generate_tokens, + 'generate_text': generate_text, + } + np.save(f'{save_path}.npy', save_dict) + + +def clean_answer(data): + data = data.lower() + data = re.sub('[ ]+$', '', data) + data = re.sub('^[ ]+', '', data) + data = re.sub(' {2,}', ' ', data) + + data = re.sub('\.[ ]{2,}', '. ', data) + data = re.sub('[^a-zA-Z0-9,\'\s\-:]+', '', data) + data = re.sub('ç', 'c', data) + data = re.sub('’', '\'', data) + data = re.sub(r'\bletf\b', 'left', data) + data = re.sub(r'\blet\b', 'left', data) + data = re.sub(r'\btehre\b', 'there', data) + data = re.sub(r'\brigth\b', 'right', data) + data = re.sub(r'\brght\b', 'right', data) + data = re.sub(r'\bbehine\b', 'behind', data) + data = re.sub(r'\btv\b', 'TV', data) + data = re.sub(r'\bchai\b', 'chair', data) + data = re.sub(r'\bwasing\b', 'washing', data) + data = re.sub(r'\bwaslked\b', 'walked', data) + data = re.sub(r'\boclock\b', 'o\'clock', data) + data = re.sub(r'\bo\'[ ]+clock\b', 'o\'clock', data) + + # digit to word, only for answer + data = re.sub(r'\b0\b', 'zero', data) + data = re.sub(r'\bnone\b', 'zero', data) + data = re.sub(r'\b1\b', 'one', data) + data = re.sub(r'\b2\b', 'two', data) + data = re.sub(r'\b3\b', 'three', data) + data = re.sub(r'\b4\b', 'four', data) + data = re.sub(r'\b5\b', 'five', data) + data = re.sub(r'\b6\b', 'six', data) + data = re.sub(r'\b7\b', 'seven', data) + data = re.sub(r'\b8\b', 'eight', data) + data = re.sub(r'\b9\b', 'nine', data) + data = re.sub(r'\b10\b', 'ten', data) + data = re.sub(r'\b11\b', 'eleven', data) + data = re.sub(r'\b12\b', 'twelve', data) + data = re.sub(r'\b13\b', 'thirteen', data) + data = re.sub(r'\b14\b', 'fourteen', data) + data = re.sub(r'\b15\b', 'fifteen', data) + data = re.sub(r'\b16\b', 'sixteen', data) + data = re.sub(r'\b17\b', 'seventeen', data) + data = re.sub(r'\b18\b', 'eighteen', data) + data = re.sub(r'\b19\b', 'nineteen', data) + data = re.sub(r'\b20\b', 'twenty', data) + data = re.sub(r'\b23\b', 'twenty-three', data) + + # misc + # no1, mat2, etc + data = re.sub(r'\b([a-zA-Z]+)([0-9])\b', r'\g<1>', data) + data = re.sub(r'\ba\b ([a-zA-Z]+)', r'\g<1>', data) + data = re.sub(r'\ban\b ([a-zA-Z]+)', r'\g<1>', data) + data = re.sub(r'\bthe\b ([a-zA-Z]+)', r'\g<1>', data) + + data = re.sub(r'\bbackwards\b', 'backward', data) + + return data diff --git a/models/LEO/data/dataset_wrapper.py b/models/LEO/data/dataset_wrapper.py new file mode 100755 index 0000000..1eb92bb --- /dev/null +++ b/models/LEO/data/dataset_wrapper.py @@ -0,0 +1,30 @@ +import torch +from torch.utils.data import Dataset + +from .build import DATASETWRAPPER_REGISTRY +from .data_utils import pad_tensors + + +@DATASETWRAPPER_REGISTRY.register() +class LeoObjPadDatasetWrapper(Dataset): + + def __init__(self, dataset, args): + self.dataset = dataset + self.max_obj_len = args.max_obj_len + + def __len__(self): + return len(self.dataset) + + def __getitem__(self, idx): + data_dict = self.dataset[idx] + + data_dict['obj_fts'] = pad_tensors(data_dict['obj_fts'], + lens=self.max_obj_len, + pad=1.0).float() # O, num_points, 6 + data_dict['obj_masks'] = ( + torch.arange(self.max_obj_len) < len(data_dict['obj_locs'])) # O + data_dict['obj_locs'] = pad_tensors(data_dict['obj_locs'], + lens=self.max_obj_len, + pad=0.0).float() # O, 6 + + return data_dict diff --git a/models/LEO/data/datasets.py b/models/LEO/data/datasets.py new file mode 100755 index 0000000..47d2f1f --- /dev/null +++ b/models/LEO/data/datasets.py @@ -0,0 +1,2397 @@ +import glob +import json +import os +import pickle +import random +from copy import deepcopy + +import cv2 +import nltk +import numpy as np +import pandas as pd +import torch +from accelerate.logging import get_logger +from einops import rearrange +from scipy import sparse +from scipy.spatial.transform import Rotation as R +from torch.utils.data import Dataset + +from .build import DATASET_REGISTRY +from .data_utils import (build_rotate_mat, construct_bbox_corners, + convert_pc_to_box, eval_ref_one_sample, + get_sqa_question_type, preprocess_2d) +from .eai import (_DUMMY_CLIPORT_ACTION, CLIPORT_ACTION_SPACE_TOKENIZE, + HABITAT_ACTION_SPACE, HABITAT_ACTION_SPACE_TOKENIZE, + _extract_between, shapenetcore_pp) +from .text_pool import * + +logger = get_logger(__name__) + +# len(tokenized_sentence) / len(sentence) +LLAMA_TOKEN_SENT_RATIO = 0.24 + +LEOMIX_REQUIRED_KEYS = [ + 'source', + 'prompt_before_obj', + 'prompt_middle_1', + 'prompt_middle_2', + 'prompt_after_obj', + 'obj_fts', + # 'obj_masks', # this is filled by dataset wrapper + 'obj_locs', + 'anchor_locs', + 'anchor_orientation', + 'img_fts', # currently hardcode to 224x224 + 'img_masks', + 'output_gt', +] + + +@DATASET_REGISTRY.register() +class LeoBase(Dataset): + r""" Unified input format: + + + + + + + : + + : (masked if unnecessary) + : + : + : response label, will be appended to input sequence for computing loss during training + """ + + role_prompt = 'You are an AI visual assistant situated in a 3D scene. '\ + 'You can perceive (1) an ego-view image (accessible when necessary) and (2) the objects (including yourself) in the scene (always accessible). '\ + "You should properly respond to the USER's instruction according to the given visual information. " + situation_prompt = '{situation}' + egoview_prompt = 'Ego-view image:' + objects_prompt = 'Objects (including you) in the scene:' + task_prompt = 'USER: {instruction} ASSISTANT:' + + @staticmethod + def get_prompts(instruction, situation='', dialogue=None): + return { + 'prompt_before_obj': + LeoBase.role_prompt + + LeoBase.situation_prompt.format(situation=situation), + 'prompt_middle_1': + LeoBase.egoview_prompt, + 'prompt_middle_2': + LeoBase.objects_prompt, + 'prompt_after_obj': + LeoBase.task_prompt.format( + instruction=instruction) if dialogue is None else dialogue, + } + + @staticmethod + def check_output_and_fill_dummy(data_dict): + if 'anchor_locs' not in data_dict: + data_dict['anchor_locs'] = torch.zeros(3) + if 'anchor_orientation' not in data_dict: + data_dict['anchor_orientation'] = torch.zeros(4) + data_dict['anchor_orientation'][-1] = 1 # xyzw + if 'img_fts' not in data_dict: + data_dict['img_fts'] = torch.zeros( + 3, 224, 224) # currently hardcode to 224x224 + if 'img_masks' not in data_dict: + data_dict['img_masks'] = torch.LongTensor([0]).bool() + + for key in LEOMIX_REQUIRED_KEYS: + if key not in data_dict: + raise ValueError(f'Key {key} is missing in LeoMix data_dict') + return data_dict + + def load_rscan(self, scan_id): + scan_path = os.path.join(self.rscan_base, '3RScan-ours-align', scan_id) + pcd_data = torch.load(os.path.join(scan_path, 'pcd-align.pth')) + points, colors, instance_labels = pcd_data[0], pcd_data[1], pcd_data[2] + colors = colors / 127.5 - 1 + pcds = np.concatenate([points, colors], 1) + inst_to_label = torch.load(os.path.join(scan_path, + 'inst_to_label.pth')) + + # build obj_pcds + obj_pcds = {} + for inst_id in inst_to_label.keys(): + mask = instance_labels == inst_id + obj_pcds.update({inst_id: pcds[mask]}) + + return {'obj_pcds': obj_pcds} + + def load_scannet(self, scan_id): + scan = {} + pcd_data = torch.load( + os.path.join('../../data/mmscan_scenes', f'{scan_id}.pth')) + points, colors, instance_labels = pcd_data[0], pcd_data[1], pcd_data[ + -1] + colors = colors * 2 - 1 + pcds = np.concatenate([points, colors], 1) + scan['pcds'] = deepcopy(pcds) + obj_pcds = {} + for i in range(instance_labels.max() + 1): + mask = instance_labels == i + obj_pcds.update({i: pcds[mask]}) + + scan['obj_pcds'] = obj_pcds + scan['scene_center'] = (points.max(0) + points.min(0)) / 2 + + if hasattr(self, 'pc_type') and self.pc_type == 'pred': + # Mask3D proposals + mask_path = os.path.join(self.scannet_base, 'mask', + f'{str(scan_id)}.mask.npz') + obj_masks = np.array(sparse.load_npz(mask_path).todense())[:50, :] + obj_pcds_pred = [] + for i in range(obj_masks.shape[0]): + mask = obj_masks[i] + obj_pcds_pred.append(pcds[mask == 1, :]) + scan['obj_pcds_pred'] = obj_pcds_pred + + return scan + + def preprocess_pcd(self, obj_pcds, return_anchor=False, rot_aug=True): + # rotate scene + rot_matrix = build_rotate_mat(self.split, rot_aug=rot_aug) + # normalize pc and calculate location + obj_fts = [] + obj_locs = [] + anchor_loc = None + for i, obj_pcd in enumerate(obj_pcds): + + if rot_matrix is not None: + obj_pcd[:, :3] = np.matmul(obj_pcd[:, :3], + rot_matrix.transpose()) + + obj_center = obj_pcd[:, :3].mean(0) + obj_size = obj_pcd[:, :3].max(0) - obj_pcd[:, :3].min(0) + obj_locs.append(np.concatenate([obj_center, obj_size], 0)) + if return_anchor and i == 0: + # Select a loc within the obj bbox as the anchor. + anchor_loc = obj_pcd[:, :3].min( + 0) + np.random.rand(3) * obj_size + + # subsample + pcd_idxs = np.random.choice(len(obj_pcd), + size=self.num_points, + replace=len(obj_pcd) < self.num_points) + obj_pcd = obj_pcd[pcd_idxs] + + # normalize + obj_pcd[:, :3] = obj_pcd[:, :3] - obj_pcd[:, :3].mean(0) + max_dist = np.sqrt((obj_pcd[:, :3]**2).sum(1)).max() + if max_dist < 1e-6: # take care of tiny point-clouds, i.e., padding + max_dist = 1 + obj_pcd[:, :3] = obj_pcd[:, :3] / max_dist + obj_fts.append(obj_pcd) + + # convert to torch + + obj_fts = torch.from_numpy(np.stack(obj_fts, 0)).float() + obj_locs = torch.from_numpy(np.array(obj_locs)).float() + + if return_anchor and anchor_loc is not None: + anchor_loc = torch.from_numpy(anchor_loc).float() + else: + anchor_loc = torch.zeros(3) + + return obj_fts, obj_locs, anchor_loc + + def _split_sentence(self, sentence, max_length, prefix=''): + # only split during training + if self.split == 'train' and len(prefix + sentence) > max_length: + all_caps = [] + sents = sentence.split('. ') + tmp = prefix + for i in range(len(sents)): + if len(tmp + sents[i] + '. ') > max_length: + all_caps.append(tmp) + tmp = prefix + tmp += sents[i] + '. ' + + all_caps.append(tmp) # last chunk + + # final check + ret = [] + for cap in all_caps: + if len(cap) <= max_length: + ret.append(cap) + return ret + else: + return [prefix + sentence] + + +# alignment + + +@DATASET_REGISTRY.register() +class LeoCap3D(LeoBase): + situation_pool = Leo_situation_pool + instruction_pool = Leo_objcap_instruction_pool + + def __init__(self, cfg, split): + super().__init__() + self.split = split + self.cap3d_root = cfg.data.cap3d.cap3d_root + self.num_points = cfg.data.cap3d.num_points + + logger.info(f'Loading LeoCap3D {split}-set language') + self.create_obj_cap_dict(self.cap3d_root) + if split == 'train': + self.obj_ids = self.obj_ids[:-1000] + else: + self.obj_ids = self.obj_ids[-1000:] + logger.info( + f'Finish loading LeoCap3D {split}-set language, collected {len(self.obj_ids)} data' + ) + + def create_obj_cap_dict(self, cap3d_root): + obj_csv = pd.read_csv(os.path.join( + cap3d_root, 'Cap3D_automated_Objaverse_no3Dword.csv'), + header=None) + self.obj_ids = [] + self.obj_cap_dict = {} + for obj_id, cap in zip(obj_csv[0].values, obj_csv[1].values): + # remove redundant quotation marks, here we do not directly strip because the mark may appear only at one side + if cap.startswith('"') and cap.endswith('"'): + cap = cap.strip('"') + elif cap.startswith("'") and cap.endswith("'"): + cap = cap.strip("'") + + self.obj_ids.append(obj_id) + self.obj_cap_dict[obj_id] = cap + + def load_obj_pcd(self, obj_id): + pcd = torch.load(os.path.join(self.cap3d_root, + f'Cap3D_pcs_pt/{obj_id}.pt'), + map_location='cpu') # (6, 16384) + pcd = rearrange(pcd, 'c n -> n c') # (16384, 6), xyz (m) + rgb (uint8) + pcd[:, + 3:] = pcd[:, + 3:] / 127.5 - 1 # (16384, 6), xyz (m) + rgb (float, [-1, 1]) + return pcd + + def __len__(self): + return len(self.obj_ids) + + def __getitem__(self, index): + obj_id = self.obj_ids[index] + obj_pcd = self.load_obj_pcd(obj_id) + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd([obj_pcd.numpy()], + return_anchor=True) + + data_dict = self.get_prompts( + instruction=random.choice(self.instruction_pool), + situation=random.choice(self.situation_pool), + ) + data_dict.update({ + 'source': 'objaverse', + 'scene_id': obj_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': self.obj_cap_dict[obj_id], + }) + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoObjSceneCap(LeoBase): + situation_pool = Leo_situation_pool + instruction_pool = Leo_objcap_instruction_pool + + def __init__(self, cfg, split='train'): + super().__init__() + assert split == 'train', 'LeoObjSceneCap only supports training during the alignment stage' + self.split = split + self.rscan_base = cfg.data.obj_scene_cap.rscan_base + self.scannet_base = cfg.data.obj_scene_cap.scannet_base + self.num_points = cfg.data.obj_scene_cap.num_points + self.max_obj_len = cfg.data.obj_scene_cap.max_obj_len + self.max_caption_length = int(cfg.llm.max_out_len / + LLAMA_TOKEN_SENT_RATIO) + + logger.info('Loading LeoObjSceneCap train-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.obj_scene_cap.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoObjSceneCap train-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {'3rscan': {}, 'scannet': {}} + + def load_anno(self, anno_dir): + # may contain both 3RScan and ScanNet + scan_ids = [] + scan_caps = [] + for fname in os.listdir(anno_dir): + with open(os.path.join(anno_dir, fname)) as f: + json_data = json.load(f) + if '3rscan' in fname.lower(): + if 'scanscribe' in fname.lower(): + for meta_anno in json_data: + cap = meta_anno['sentence'] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_ids.append({ + 'source': '3rscan', + 'scan_id': meta_anno['scan_id'], + }) + scan_caps.append({ + 'obj_id': meta_anno['object_id'], + 'caption': c, + }) + else: + # 3rscan_prompted + for k, v in json_data.items(): + for obj_str, obj_v in v.items(): + obj_id = int(obj_str.split('-')[-1]) + for meta_anno in obj_v: + cap = meta_anno['response'] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_ids.append({ + 'source': '3rscan', + 'scan_id': k, + }) + scan_caps.append({ + 'obj_id': obj_id, + 'caption': c, + }) + elif 'scannet' in fname.lower(): + # referit3d + for item in json_data: + obj_id = int(item['target_id']) + cap = item['utterance'] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_ids.append({ + 'source': 'scannet', + 'scan_id': item['scan_id'], + }) + scan_caps.append({ + 'obj_id': item['target_id'], + 'caption': c, + }) + + return scan_ids, scan_caps + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_meta = self.scan_ids[index] + scan_source = scan_meta['source'] + scan_id = scan_meta['scan_id'] + + lang_meta = self.lang_data[index] + obj_id = lang_meta['obj_id'] + obj_caption = lang_meta['caption'] + + # load pcds + if scan_id not in self.scan_data[scan_source]: + if scan_source == '3rscan': + self.scan_data['3rscan'][scan_id] = self.load_rscan(scan_id) + elif scan_source == 'scannet': + self.scan_data['scannet'][scan_id] = self.load_scannet(scan_id) + obj_pcds = self.scan_data[scan_source][scan_id]['obj_pcds'].copy() + # Dict: { int: np.ndarray (N, 6) } + + selected_obj_pcds = [obj_pcds[obj_id]] + remained_obj_idx = [i for i in obj_pcds.keys() if i != obj_id] + if self.split == 'train': + random.shuffle(remained_obj_idx) + selected_obj_pcds.extend( + [obj_pcds[i] for i in remained_obj_idx[:self.max_obj_len - 1]]) + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd(selected_obj_pcds, + return_anchor=True) + data_dict = self.get_prompts( + instruction=random.choice(self.instruction_pool), + situation=random.choice(self.situation_pool), + ) + data_dict.update({ + 'source': scan_source, + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': obj_caption, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoSceneCap(LeoBase): + instruction_pool = Leo_scenecap_instruction_pool + + def __init__(self, cfg, split): + super().__init__() + self.split = 'train' if split == 'train' else 'val' + self.rscan_base = cfg.data.scene_cap.rscan_base + self.num_points = cfg.data.scene_cap.num_points + self.max_obj_len = cfg.data.scene_cap.max_obj_len + self.max_caption_length = int(cfg.llm.max_out_len / + LLAMA_TOKEN_SENT_RATIO) + + logger.info(f'Loading LeoSceneCap {split}-set language') + self.scan_ids, self.lang_data, self.scan_insts = self.load_anno( + cfg.data.scene_cap.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoSceneCap {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + scan_caps = [] + scan_insts = [] # relevant instances + anno_file = os.path.join(anno_dir, + f'3rscan_scenecap_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for k, v in json_data.items(): + for meta_anno in v: + scene_graph = eval(meta_anno['query']) + insts = [int(s.split('-')[-1]) for s in scene_graph.keys()] + + cap = meta_anno['response'] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_caps.append(c) + scan_ids.append(k) + scan_insts.append(insts) + + return scan_ids, scan_caps, scan_insts + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + caption = self.lang_data[index] + scan_insts = self.scan_insts[index] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_rscan(scan_id) + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy() + # Dict: { int: np.ndarray (N, 6) } + + selected_obj_pcds = [obj_pcds[i] for i in scan_insts] + num_selected_objs = len(selected_obj_pcds) + if num_selected_objs >= self.max_obj_len: + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + else: + # select from remaining objs + remained_obj_idx = [ + i for i in obj_pcds.keys() if i not in scan_insts + ] + if self.split == 'train': + random.shuffle(selected_obj_pcds) + random.shuffle(remained_obj_idx) + selected_obj_pcds.extend([ + obj_pcds[i] for i in remained_obj_idx[:self.max_obj_len - + num_selected_objs] + ]) + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + data_dict = self.get_prompts( + instruction=random.choice(self.instruction_pool), + situation='', + ) + data_dict.update({ + 'source': '3rscan', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': caption, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +# instruction tuning + + +@DATASET_REGISTRY.register() +class LeoScan2Cap(LeoBase): + situation_pool = Leo_situation_pool + instruction_pool = Leo_objcap_instruction_pool + + def __init__(self, cfg, split): + super().__init__() + self.scannet_base = cfg.data.scan2cap.scannet_base + self.num_points = cfg.data.scan2cap.num_points + self.max_obj_len = cfg.data.scan2cap.max_obj_len + self.max_caption_length = int(cfg.llm.max_out_len / + LLAMA_TOKEN_SENT_RATIO) + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.scan2cap, 'pc_type', 'gt') + + self.iou_threshold = getattr(cfg.data.scan2cap, 'iou_thres', 0.5) + + logger.info(f'Loading LeoScan2Cap {split}-set language') + self.scan_ids, self.lang_data, self.corpus_cache = self.load_anno( + cfg.data.scan2cap.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoScan2Cap {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + scan_caps = [] + corpus_cache = [] + anno_file = os.path.join(anno_dir, f'scanrefer_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for item in json_data: + scan_id = item['scan_id'] + obj_id = int(item['target_id']) + obj_name = item['instance_type'] + key = f'{scan_id}|{obj_id}|{obj_name}' + if self.split != 'train' and key in corpus_cache: + continue + # only evaluate once per obj instance + corpus_cache.append(key) + cap = item['utterance'] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_ids.append(item['scan_id']) + scan_caps.append({ + 'obj_id': item['target_id'], + 'caption': c, + }) + + return scan_ids, scan_caps, corpus_cache + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + lang_meta = self.lang_data[index] + obj_id = lang_meta['obj_id'] + obj_caption = lang_meta['caption'] + corpus_key = self.corpus_cache[index] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + if self.pc_type == 'gt': + iou_flag = 1 + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + selected_obj_pcds = [obj_pcds[obj_id]] + remained_obj_idx = [i for i in obj_pcds.keys() if i != obj_id] + else: + # only for evaluation with object proposals + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + gt_pcd = self.scan_data[scan_id]['obj_pcds'][obj_id].copy() + gt_center, gt_box_size = convert_pc_to_box(gt_pcd) + tgt_obj_id_pred = -1 + overlap_obj_id_list = [] + max_iou = self.iou_threshold + iou_flag = 0 + # find max iou + for i in range(len(obj_pcds)): + obj_center, obj_box_size = convert_pc_to_box(obj_pcds[i]) + current_iou = eval_ref_one_sample( + construct_bbox_corners(obj_center, obj_box_size), + construct_bbox_corners(gt_center, gt_box_size)) + if current_iou >= max_iou: + iou_flag = 1 + tgt_obj_id_pred = i + max_iou = current_iou + if current_iou >= 0.25: + # this list includes tgt_obj_id_pred, as long as iou_thres >= 0.25 + overlap_obj_id_list.append(i) + selected_obj_pcds = [obj_pcds[tgt_obj_id_pred]] + selected_obj_pcds.extend([ + obj_pcds[i] for i in overlap_obj_id_list + if i != tgt_obj_id_pred + ]) + remained_obj_idx = [ + i for i in range(len(obj_pcds)) if i not in overlap_obj_id_list + ] + + num_selected_obj = len(selected_obj_pcds) + if num_selected_obj >= self.max_obj_len: + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + else: + if self.split == 'train': + random.shuffle(remained_obj_idx) + selected_obj_pcds.extend([ + obj_pcds[i] + for i in remained_obj_idx[:self.max_obj_len - num_selected_obj] + ]) + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd(selected_obj_pcds, + return_anchor=True) + data_dict = self.get_prompts( + instruction=random.choice(self.instruction_pool), + situation=random.choice(self.situation_pool), + ) + data_dict.update({ + 'source': 'scannet', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': obj_caption, + 'iou_flag': torch.LongTensor([iou_flag]).bool(), + 'corpus_key': corpus_key, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoNr3D(LeoScan2Cap): + situation_pool = Leo_situation_pool + instruction_pool = Leo_objcap_instruction_pool + + def __init__(self, cfg, split): + super(LeoScan2Cap, self).__init__() + self.scannet_base = cfg.data.nr3d.scannet_base + self.num_points = cfg.data.nr3d.num_points + self.max_obj_len = cfg.data.nr3d.max_obj_len + self.max_caption_length = int(cfg.llm.max_out_len / + LLAMA_TOKEN_SENT_RATIO) + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.nr3d, 'pc_type', 'gt') + + self.iou_threshold = getattr(cfg.data.nr3d, 'iou_thres', 0.5) + + logger.info(f'Loading LeoNr3D {split}-set language') + self.scan_ids, self.lang_data, self.corpus_cache = self.load_anno( + cfg.data.nr3d.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoNr3D {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + scan_caps = [] + corpus_cache = [] + anno_file = os.path.join(anno_dir, f'nr3d_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for item in json_data: + scan_id = item['scan_id'] + obj_id = int(item['target_id']) + obj_name = item['instance_type'] + key = f'{scan_id}|{obj_id}|{obj_name}' + if self.split != 'train' and key in corpus_cache: + continue + # only evaluate once per obj instance + corpus_cache.append(key) + cap = item['utterance'] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_ids.append(item['scan_id']) + scan_caps.append({ + 'obj_id': item['target_id'], + 'caption': c, + }) + + return scan_ids, scan_caps, corpus_cache + + +@DATASET_REGISTRY.register() +class LeoScanQA(LeoBase): + + def __init__(self, cfg, split): + super().__init__() + self.scannet_base = cfg.data.scanqa.scannet_base + self.num_points = cfg.data.scanqa.num_points + self.max_obj_len = cfg.data.scanqa.max_obj_len + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.scanqa, 'pc_type', 'gt') + + logger.info(f'Loading LeoScanQA {split}-set language') + self.scan_ids, self.lang_data, self.scan_insts = self.load_anno( + cfg.data.scanqa.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoScanQA {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + scan_qa_pairs = [] + scan_insts = [] + anno_file = os.path.join(anno_dir, f'ScanQA_v1.0_{self.split}.json') + with open(anno_file, 'r', encoding='utf-8') as f: + json_data = json.load(f) + + for item in json_data: + scan_ids.append(item['scene_id']) + scan_qa_pairs.append({ + 'q': item['question'], # str + 'a': [s.strip() for s in item['answers']], # list of str + }) + # try to parse concerned objects + insts = item['object_ids'] + scan_insts.append(insts) + + return scan_ids, scan_qa_pairs, scan_insts + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + qa_dict = self.lang_data[index] + scan_insts = self.scan_insts[index] + question = qa_dict['q'] # str + answer_list = qa_dict['a'] # list of str + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + if self.pc_type == 'gt': + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + selected_obj_pcds = [obj_pcds[obj_id] for obj_id in scan_insts] + remained_obj_idx = [ + i for i in obj_pcds.keys() if i not in scan_insts + ] + else: + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + gt_center = [] + gt_box_size = [] + for obj_id in scan_insts: + gt_pcd = self.scan_data[scan_id]['obj_pcds'][obj_id].copy() + center, box_size = convert_pc_to_box(gt_pcd) + gt_center.append(center) + gt_box_size.append(box_size) + + # select proposals with high IoU with question-relevant gt pcds + selected_obj_pcds = [] + remained_obj_idx = [] + for i in range(len(obj_pcds)): + obj_center, obj_box_size = convert_pc_to_box(obj_pcds[i]) + proposal_selected = False + for center, box_size in zip(gt_center, gt_box_size): + if eval_ref_one_sample( + construct_bbox_corners(obj_center, obj_box_size), + construct_bbox_corners(center, box_size)) >= 0.25: + selected_obj_pcds.append(obj_pcds[i]) + proposal_selected = True + break + if not proposal_selected: + remained_obj_idx.append(i) + + num_selected_objs = len(selected_obj_pcds) + if num_selected_objs >= self.max_obj_len: + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + else: + if self.split == 'train': + random.shuffle(selected_obj_pcds) + random.shuffle(remained_obj_idx) + selected_obj_pcds.extend([ + obj_pcds[i] for i in remained_obj_idx[:self.max_obj_len - + num_selected_objs] + ]) + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + + data_dict = self.get_prompts( + instruction=question, + situation='', + ) + data_dict.update({ + 'source': + 'scannet', + 'scene_id': + scan_id, + 'obj_fts': + obj_fts, + 'obj_locs': + obj_locs, + 'anchor_locs': + anchor_loc, + 'img_fts': + torch.zeros(3, 224, 224), + 'img_masks': + torch.LongTensor([0]).bool(), + 'output_gt': + random.choice(answer_list) + if self.split == 'train' else answer_list, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoSQA3D(LeoBase): + situation_pool = Leo_situation_pool + + def __init__(self, cfg, split): + super().__init__() + self.split = split + self.scannet_base = cfg.data.sqa3d.scannet_base + self.num_points = cfg.data.sqa3d.num_points + self.max_obj_len = cfg.data.sqa3d.max_obj_len + if split == 'train': + self.pc_type = 'gt' + else: + self.pc_type = getattr(cfg.data.sqa3d, 'pc_type', 'gt') + + logger.info(f'Loading LeoSQA3D {split}-set language') + self.scan_ids, self.lang_data, self.align_matrices = self.load_anno( + cfg.data.sqa3d.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoSQA3D {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + sqa_annos = [] + + question_file = os.path.join( + anno_dir, f'v1_balanced_questions_{self.split}_scannetv2.json') + with open(question_file, 'r', encoding='utf-8') as f: + question_data = json.load(f)['questions'] + question_map = {} + for item in question_data: + question_map[item['question_id']] = { + 's': [item['situation']] + + item['alternative_situation'], # list of str + 'q': item['question'], # str + } + + anno_file = os.path.join( + anno_dir, + f'v1_balanced_sqa_annotations_{self.split}_scannetv2.json') + with open(anno_file, 'r', encoding='utf-8') as f: + anno_data = json.load(f)['annotations'] + for item in anno_data: + scan_ids.append(item['scene_id']) + sqa_annos.append({ + 's': question_map[item['question_id']]['s'], # list of str + 'q': question_map[item['question_id']]['q'], # str + 'a': + [meta['answer'] for meta in item['answers']], # list of str + 'pos': np.array(list(item['position'].values())), # array (3,) + 'rot': np.array(list(item['rotation'].values())), # array (4,) + }) + + align_matrices = torch.load(os.path.join(anno_dir, + 'axisAlignment.pth')) + + return scan_ids, sqa_annos, align_matrices + + def __len__(self): + return len(self.scan_ids) + + def convert_person_view(self, sentence): + # first-person view to second-person view + forms = { + 'i': 'you', + 'me': 'you', + 'my': 'your', + 'mine': 'yours', + 'am': 'are' + } + + def translate(word): + if word.lower() in forms: + return forms[word.lower()] + return word + + result = ' '.join( + [translate(word) for word in nltk.wordpunct_tokenize(sentence)]) + return result.capitalize() + + def align_situation(self, pos, ori, scene_center, align_matrix): + """ + We need to transform the location and orientation to align with pcd + pos: [x, y, z]; ori: [_x, _y, _z, _w] + """ + if isinstance(pos, dict): + pos = [pos['x'], pos['y'], pos['z']] + pos = np.array(pos) + + if isinstance(ori, dict): + ori = [ori['_x'], ori['_y'], ori['_z'], ori['_w']] + ori = np.array(ori) + + pos_new = pos.reshape(1, 3) @ align_matrix.T + pos_new += scene_center + pos_new = pos_new.reshape(-1) + + ori = R.from_quat(ori).as_matrix() + ori_new = align_matrix @ ori + ori_new = -ori_new # SQA3D annotation corresponds to the opposite orientation + ori_new = R.from_matrix(ori_new).as_quat() + ori_new = ori_new.reshape(-1) + return pos_new, ori_new + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + sqa_dict = self.lang_data[index] + situation = sqa_dict['s'] # list of str + question = sqa_dict['q'] # str + answer_list = sqa_dict['a'] # list of str + pos = sqa_dict['pos'] # array, (3,) + rot = sqa_dict['rot'] # array, (4,) + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + # sqa3d has no annotations of question-relevant objs + if self.pc_type == 'gt': + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + obj_pcds = list(obj_pcds.values()) # to list + else: + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + + if self.split == 'train': + random.shuffle(obj_pcds) + selected_obj_pcds = obj_pcds[:self.max_obj_len] + + # align situation with pcd + pos_aligned, rot_aligned = self.align_situation( + pos, rot, self.scan_data[scan_id]['scene_center'], + self.align_matrices[scan_id]) + + obj_fts, obj_locs, (pos_aligned, rot_aligned) = self.preprocess_pcd( + selected_obj_pcds, + return_anchor=False, + situation=(pos_aligned, rot_aligned)) + + if self.split == 'train': + # augmentation for train + situation = random.choice(situation) + else: + # fix for eval + situation = situation[0] + + question_type = get_sqa_question_type(question) + + data_dict = self.get_prompts( + instruction=self.convert_person_view(question), + situation=random.choice(self.situation_pool) + ' ' + + self.convert_person_view(situation), + ) + data_dict.update({ + 'source': + 'scannet', + 'scene_id': + scan_id, + 'obj_fts': + obj_fts, + 'obj_locs': + obj_locs, + 'situation': + situation, + 'anchor_locs': + torch.from_numpy(pos_aligned).float(), + 'anchor_orientation': + torch.from_numpy(rot_aligned).float(), + 'img_fts': + torch.zeros(3, 224, 224), + 'img_masks': + torch.LongTensor([0]).bool(), + 'output_gt': + random.choice(answer_list) + if self.split == 'train' else answer_list, + 'sqa_type': + question_type, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class Leo3RScanQA(LeoBase): + """json format + { + "1776ad80-4db7-2333-8b18-f02ef42f3569": { + "query": "{'floor-1': {'relations': [], 'attribute': {'material': 'wooden', 'shape': 'flat', 'color': 'brown'}},}", + "response": [ + { + "Q": "What is the material of the floor?", + "T": "floor-1", + "A": ["wooden"] + }, + { + "Q": "What color are the walls?", + "T": "wall-2, wall-3", + "A": ["white"] + }, + ] + }, + } + """ + + def __init__(self, cfg, split): + super().__init__() + self.split = 'train' if split == 'train' else 'val' + self.rscan_base = cfg.data.rscan_qa.rscan_base + self.num_points = cfg.data.rscan_qa.num_points + self.max_obj_len = cfg.data.rscan_qa.max_obj_len + + logger.info(f'Loading Leo3RScanQA {split}-set language') + self.scan_ids, self.lang_data, self.scan_insts = self.load_anno( + cfg.data.rscan_qa.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading Leo3RScanQA {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + scan_qa_pairs = [] + scan_insts = [] + anno_file = os.path.join(anno_dir, f'3rscan_qa_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for k, v in json_data.items(): + for meta_anno in v['response']: + # try to parse concerned objects + try: + insts = meta_anno['T'].split(', ') + insts = [int(s.split('-')[-1]) for s in insts] + except: + insts = [] + scan_insts.append(insts) + scan_ids.append(k) + scan_qa_pairs.append({ + 'q': meta_anno['Q'], # str + 'a': [a.strip() for a in meta_anno['A']], # list of str + }) + + return scan_ids, scan_qa_pairs, scan_insts + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + qa_dict = self.lang_data[index] + scan_insts = self.scan_insts[index] + question = qa_dict['q'] + answer_list = qa_dict['a'] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_rscan(scan_id) + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy() + # Dict: { int: np.ndarray (N, 6) } + + # crop objects to max_obj_len, select relevant objs first + selected_obj_pcds = [obj_pcds[i] for i in scan_insts] + + num_selected_objs = len(selected_obj_pcds) + if num_selected_objs >= self.max_obj_len: + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + else: + # select from remaining objs + remained_obj_idx = [ + i for i in obj_pcds.keys() if i not in scan_insts + ] + if self.split == 'train': + random.shuffle(selected_obj_pcds) + random.shuffle(remained_obj_idx) + for i in remained_obj_idx[:self.max_obj_len - num_selected_objs]: + selected_obj_pcds.append(obj_pcds[i]) + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + data_dict = self.get_prompts( + instruction=question, + situation='', + ) + data_dict.update({ + 'source': + '3rscan', + 'scene_id': + scan_id, + 'obj_fts': + obj_fts, + 'obj_locs': + obj_locs, + 'anchor_locs': + anchor_loc, + 'img_fts': + torch.zeros(3, 224, 224), + 'img_masks': + torch.LongTensor([0]).bool(), + 'output_gt': + random.choice(answer_list) + if self.split == 'train' else answer_list, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class Leo3RScanPlan(LeoBase): + instruction_prefix_pool = Leo_plan_instruction_pool + + def __init__(self, cfg, split): + super().__init__() + self.split = 'train' if split == 'train' else 'val' + self.rscan_base = cfg.data.rscan_plan.rscan_base + self.num_points = cfg.data.rscan_plan.num_points + self.max_obj_len = cfg.data.rscan_plan.max_obj_len + + logger.info(f'Loading Leo3RScanPlan {split}-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.rscan_plan.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading Leo3RScanPlan {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + lang_data = [] + anno_file = os.path.join(anno_dir, f'3rscan_plan_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for k, v in json_data.items(): + for meta_anno in v['response']: + scan_ids.append(k) + lang_data.append({ + 'goal': meta_anno['instruction'], + 'plan': meta_anno['plan'], + }) + # no split operation as we assume the response length has been processed in advance + + return scan_ids, lang_data + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + goal_plan_pair = self.lang_data[index] + goal = goal_plan_pair['goal'] + plan = goal_plan_pair['plan'] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_rscan(scan_id) + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy() + # Dict: { int: np.ndarray (N, 6) } + + selected_obj_pcds = list(obj_pcds.values()) + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + data_dict = self.get_prompts( + instruction=random.choice(self.instruction_prefix_pool) + ': ' + + goal.lower(), + situation='', + ) + data_dict.update({ + 'source': '3rscan', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': plan, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class Leo3RScanDialog(LeoBase): + r"""The format of json file + { + 'scan_id': { + 'query': scene graph, + 'response': dialogues, # list of list, [dialog_1, dialog_2, ...] + } + } + The format of dialog_i + [ + {'role': 'Human', 'content': 'What is the color of the sofa?'}, + {'role': 'Robot', 'content': 'The color of the sofa is red. '}, + {'role': 'Human', 'content': 'Is the sofa in good condition?'}, + {'role': 'Robot', 'content': 'No, the sofa is in an old state. '}, + ] + Dialogue for Vicuna: "USER: Who are you? ASSISTANT: I am Vicuna.USER: What can you do? ASSISTANT:" + """ + + def __init__(self, cfg, split): + super().__init__() + self.split = 'train' if split == 'train' else 'val' + self.rscan_base = cfg.data.rscan_dialog.rscan_base + self.num_points = cfg.data.rscan_dialog.num_points + self.max_obj_len = cfg.data.rscan_dialog.max_obj_len + + logger.info(f'Loading Leo3RScanDialog {split}-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.rscan_dialog.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading Leo3RScanDialog {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + lang_data = [] + anno_file = os.path.join(anno_dir, f'3rscan_dialog_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for k, v in json_data.items(): + dialogs = v['response'] + for dialog in dialogs: + assert dialog[0][ + 'role'] == 'Human', 'Dialogue should start with Human' + assert len( + dialog) > 1, 'Dialogue should contain Robot responses' + history = f"USER: {dialog[0]['content']} ASSISTANT:" + scan_ids.append(k) + lang_data.append({ + 'history': history, + 'response': dialog[1]['content'].strip(), + }) + for i in range(1, len(dialog)): + meta_anno = dialog[i] + if i % 2 == 0 and i + 1 < len(dialog): + # Human + history += f"USER: {meta_anno['content']} ASSISTANT:" + scan_ids.append(k) + lang_data.append({ + 'history': + history, + 'response': + dialog[i + 1]['content'].strip(), + }) + else: + # Robot + history += f" {meta_anno['content'].strip()}" + + return scan_ids, lang_data + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + dialog_pair = self.lang_data[index] + history = dialog_pair['history'] + response = dialog_pair['response'] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_rscan(scan_id) + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy() + # Dict: { int: np.ndarray (N, 6) } + + selected_obj_pcds = list(obj_pcds.values()) + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + data_dict = self.get_prompts( + instruction=None, + dialogue=history, + situation='', + ) + data_dict.update({ + 'source': '3rscan', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': response, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoMP3DObjNav(LeoBase): + """base_dir. + + - mp3d_obj + - scene_id + - objects.npy + - demos + - scene_id + - demo_id + - demo.json + - images + - 0000.png + ... + """ + situation_pool = Leo_situation_pool + + def __init__(self, cfg, split): + super().__init__() + self.split = 'train' if split == 'train' else 'val' + self.base_dir = cfg.data.mp3d_objnav.base_dir + self.max_obj_len = cfg.data.mp3d_objnav.max_obj_len + self.num_points = cfg.data.mp3d_objnav.num_points + self.max_traj_len = cfg.data.mp3d_objnav.max_traj_len + self.history_length = cfg.data.mp3d_objnav.history_length + self.num_pred = cfg.data.mp3d_objnav.num_pred + self.img_size = cfg.data.mp3d_objnav.img_size + self.scene_object_deterministic = cfg.data.mp3d_objnav.scene_object_deterministic + + logger.info(f'Loading LeoMP3DObjNav {split}-set demos') + self.all_data, self.num_demos = self.load_demos(self.base_dir) + logger.info( + f'Finish loading LeoMP3DObjNav {split}-set demos, collected ' + + f'{len(self.all_data)} data from {self.num_demos} demos') + self.scan_data = {} + + def load_demos(self, anno_dir): + all_data = [] + num_demos = 0 + files = glob.glob(os.path.join(anno_dir, 'demos/*/*/demo.json')) + for file in files: + with open(file, 'r') as f: + traj = json.load(f) + if self.split == 'train': + if traj['scene_id'] in self.heldout_scenes: + continue + else: + if traj['scene_id'] not in self.heldout_scenes: + continue + if len(traj['agent']) > self.max_traj_len: + continue + + scene_id = traj['scene_id'] + goal = traj['goal'] + + all_actions = [ + self.action_space[step[1]] for step in traj['agent'] + ] + for ind in range(0, len(traj['agent']), self.num_pred): + # range(0, len(traj['agent'])) for expanding datasets + cur_step = traj['agent'][ind] + + # if there is no enough steps, just predict STOP + action = _extract_between(all_actions, ind, + ind + self.num_pred - 1, + self.action_space['stop']) + history = _extract_between(all_actions, + ind - self.history_length, ind - 1, + self.action_space['stop']) + + all_data.append({ + 'scene_id': + scene_id, + 'obj_path': + os.path.join(anno_dir, 'mp3d_obj', scene_id, + 'objects.npy'), + 'img_path': + os.path.join(os.path.dirname(file), 'images', + '{:04d}.png'.format(ind)), + 'pos': + np.array(cur_step[0]['position']).astype(np.float32), + 'rot': + np.array(cur_step[0]['rotation']).astype(np.float32), + 'goal': + goal, + 'action': + np.array(action).astype(np.int32), + 'history': + np.array(history).astype(np.int32) + }) + num_demos += 1 + + return all_data, num_demos + + def __len__(self): + return len(self.all_data) + + def filter_obj_type(self, obj_list): + # obj_list: [array (num_points, 7)] * num_objs, the last column is semantic label + filtered = [] + for obj_pcd in obj_list: + sem = obj_pcd[0, -1] - 1 + if sem in shapenetcore_pp: + obj_pcd = obj_pcd[:, :6] + obj_pcd[:, 3:] = obj_pcd[:, 3:] * 2 - 1 # [0, 1] -> [-1, 1] + filtered.append(obj_pcd) + return filtered + + def __getitem__(self, index): + data = self.all_data[index] + scene_id = data['scene_id'] + goal = data['goal'] + anchor_loc = data['pos'] + anchor_orientation = data['rot'] + past_actions = data['history'] + actions = data['action'] + + # load pcds + if scene_id not in self.scan_data: + obj_list = np.load(data['obj_path'], allow_pickle=True) + self.scan_data[scene_id] = self.filter_obj_type(obj_list) + + obj_pcds = self.scan_data[scene_id].copy() + # list: [np.ndarray (N, 6)] + + # sample + num_objs = len(obj_pcds) + if self.scene_object_deterministic: + loc = random.Random(hash(scene_id)) + selected_obj_pcds = loc.sample(obj_pcds, + k=min(num_objs, self.max_obj_len)) + else: + if self.split == 'train': + selected_obj_pcds = random.sample(obj_pcds, + k=min( + num_objs, + self.max_obj_len)) + else: + selected_obj_pcds = obj_pcds[:self.max_obj_len] + obj_fts, obj_locs, _ = self.preprocess_pcd(selected_obj_pcds, + return_anchor=False) + + # load img, have not changed channels since imgs are saved as BGR + img_fts = preprocess_2d(cv2.imread(data['img_path']), + size=self.img_size) + + # tokenize actions + past_actions_tokenized = [] + for a in past_actions: + past_actions_tokenized.append(HABITAT_ACTION_SPACE_TOKENIZE[a]) + actions_tokenized = [] + for a in actions: + actions_tokenized.append(HABITAT_ACTION_SPACE_TOKENIZE[a]) + + data_dict = self.get_prompts( + instruction= + f'The task is navigation. Your goal is to find {goal} by moving around in the scene. ' + + f"Past actions: {''.join(past_actions_tokenized)}.", + situation=random.choice(self.situation_pool), + ) + data_dict.update({ + 'source': + 'mp3d', + 'scene_id': + scene_id, + 'obj_fts': + obj_fts, + 'obj_locs': + obj_locs, + 'anchor_locs': + torch.from_numpy(anchor_loc).float(), + 'anchor_orientation': + torch.from_numpy(anchor_orientation).float(), + 'img_fts': + torch.from_numpy(img_fts).float(), + 'img_masks': + torch.LongTensor([1]).bool(), + 'output_gt': + ''.join(actions_tokenized), + }) + + return self.check_output_and_fill_dummy(data_dict) + + @property + def action_space(self): + return HABITAT_ACTION_SPACE + + @property + def heldout_scenes(self): + return [ + '17DRP5sb8fy', + '5LpN3gDmAk7', + '82sE5b5pLXE', + 'D7N2EKCX4Sj', + 'HxpKQynjfin', + ] + + +@DATASET_REGISTRY.register() +class LeoCLIPort(LeoBase): + """base_dir. + + - demos + - task_type + - {DEMOID}-{RUNSEED}.pkl + ... + - index_train.pkl + - index_val.pkl + """ + situation_pool = Leo_situation_pool + + def __init__(self, cfg, split): + super().__init__() + self.split = 'train' if split == 'train' else 'val' + self.base_dir = cfg.data.cliport.base_dir + self.max_obj_len = cfg.data.cliport.max_obj_len + self.num_points = cfg.data.cliport.num_points + self.history_length = cfg.data.cliport.history_length + self.img_size = cfg.data.cliport.img_size + + logger.info(f'Loading LeoCLIPort {split}-set demos') + self.all_data_mapping, self.num_demos = self.load_demos(self.base_dir) + logger.info( + f'Finish loading LeoCLIPort {split}-set demos, collected ' + + f'{len(self.all_data_mapping)} data from {self.num_demos} demos') + + def load_demos(self, anno_dir): + index_file = os.path.join(anno_dir, f'index_{self.split}.pkl') + if os.path.exists(index_file): + with open(index_file, 'rb') as f: + all_data = pickle.load(f) + else: + # sweep and create index + all_data = {'mapping': [], 'num_demos': 0} + files = glob.glob(os.path.join(anno_dir, 'demos/*/*.pkl')) + for file in files: + if self.split == 'train': + if any([i in file for i in self.heldout_scenes]): + continue + else: + if all([i not in file for i in self.heldout_scenes]): + continue + with open(file, 'rb') as f: + traj = pickle.load(f) + for ind, step in enumerate(traj): + if step['act']: + all_data['mapping'].append((file, ind, False)) + else: + assert ind == len(traj) - 1 + all_data['mapping'].append((file, ind, True)) + all_data['num_demos'] += 1 + with open(index_file, 'wb') as f: + pickle.dump(all_data, f) + + return all_data['mapping'], all_data['num_demos'] + + @property + def heldout_scenes(self): + return ['{:06d}'.format(i) for i in range(100)] + + def __len__(self): + return len(self.all_data_mapping) + + @staticmethod + def _segment_to_object(obs): + pc = obs['pointcloud'] + cc = obs['colorcloud'][:, :3] / 127.5 - 1 # [0, 255] -> [-1, 1] + sc = obs['segmentcloud'] + unique_labels = np.unique(sc) + pc = np.concatenate([pc, cc], axis=-1) + obj_fts = [pc[sc[:, 0] == label] for label in unique_labels] + return obj_fts + + def __getitem__(self, index): + file, ind, done = self.all_data_mapping[index] + scene_id = os.path.sep.join(file.split(os.path.sep)[-2:]) + with open(file, 'rb') as f: + traj = pickle.load(f) + step = traj[ind] + + # obs + obs = step['obs'] + obj_pcds = self._segment_to_object(obs) + if self.split == 'train': + random.shuffle(obj_pcds) + obj_pcds = obj_pcds[:self.max_obj_len] + # so far, the maximum number of objects in cliport is 52 + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + obj_pcds, return_anchor=False, rot_aug=False) + + img_fts = preprocess_2d(obs['colormap'], size=self.img_size) + + # action + all_actions = [s['act'] for s in traj] + + past_actions = _extract_between(all_actions, ind - self.history_length, + ind - 1, _DUMMY_CLIPORT_ACTION) + past_actions_tokenized = [] + for a in past_actions: + for k in ['pose0', 'pose1']: + past_actions_tokenized.extend( + CLIPORT_ACTION_SPACE_TOKENIZE(a[k])) + + if done: + # the last-step goal is "done ...", we need to make it the same as other steps + goal = traj[-2]['info']['lang_goal'] + action = _DUMMY_CLIPORT_ACTION + else: + goal = step['info']['lang_goal'] + action = all_actions[ind] + + action_tokenized = [] + for k in ['pose0', 'pose1']: + action_tokenized.extend(CLIPORT_ACTION_SPACE_TOKENIZE(action[k])) + + data_dict = self.get_prompts( + instruction=f'The task is manipulation. Your goal is to {goal}. ' + + f"Past actions: {''.join(past_actions_tokenized)}.", + situation=random.choice(self.situation_pool), + ) + data_dict.update({ + 'source': 'cliport', + 'scene_id': scene_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.from_numpy(img_fts).float(), + 'img_masks': torch.LongTensor([1]).bool(), + 'output_gt': ''.join(action_tokenized), + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoSceneCap3DLLM(LeoBase): + + def __init__(self, cfg, split): + super().__init__() + self.scannet_base = cfg.data.scene_cap_3dllm.scannet_base + self.num_points = cfg.data.scene_cap_3dllm.num_points + self.max_obj_len = cfg.data.scene_cap_3dllm.max_obj_len + self.max_caption_length = int(cfg.llm.max_out_len / + LLAMA_TOKEN_SENT_RATIO) + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.scene_cap_3dllm, 'pc_type', 'gt') + + logger.info(f'Loading LeoSceneCap3DLLM {split}-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.scene_cap_3dllm.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoSceneCap3DLLM {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + scan_caps = [] + anno_file = os.path.join( + anno_dir, f'3d_llm_scene_description_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for item in json_data: + cap = item['answers'][0] + all_caps = self._split_sentence( + sentence='. '.join(cap.split('. ')[1:]), + max_length=self.max_caption_length, + prefix=cap.split('. ')[0] + '. ', + ) + for c in all_caps: + scan_caps.append(c) + scan_ids.append(item['scene_id']) + + return scan_ids, scan_caps + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + caption = self.lang_data[index] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + if self.pc_type == 'gt': + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + selected_obj_pcds = list(obj_pcds.values()) + else: + # only for evaluation with object proposals + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + selected_obj_pcds = obj_pcds + + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + data_dict = self.get_prompts( + instruction='Describe the room.', + situation='', + ) + data_dict.update({ + 'source': 'scannet', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': caption, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoQA3DLLM(LeoBase): + + def __init__(self, cfg, split): + super().__init__() + self.scannet_base = cfg.data.qa_3dllm.scannet_base + self.num_points = cfg.data.qa_3dllm.num_points + self.max_obj_len = cfg.data.qa_3dllm.max_obj_len + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.qa_3dllm, 'pc_type', 'gt') + + logger.info(f'Loading LeoQA3DLLM {split}-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.qa_3dllm.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoQA3DLLM {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + lang_data = [] + anno_file = os.path.join( + anno_dir, f'3d_llm_embodied_question_answer_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for item in json_data: + scan_ids.append(item['scene_id']) + lang_data.append({ + 'question': + item['question'].lstrip('### human: ').rstrip( + ' ### assistant:'), + 'answers': + item['answers'], + }) + + return scan_ids, lang_data + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + qa_dict = self.lang_data[index] + question = qa_dict['question'] + answer_list = qa_dict['answers'] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + if self.pc_type == 'gt': + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + selected_obj_pcds = list(obj_pcds.values()) + else: + # only for evaluation with object proposals + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + selected_obj_pcds = obj_pcds + + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + data_dict = self.get_prompts( + instruction=question, + situation='', + ) + data_dict.update({ + 'source': + 'scannet', + 'scene_id': + scan_id, + 'obj_fts': + obj_fts, + 'obj_locs': + obj_locs, + 'anchor_locs': + anchor_loc, + 'img_fts': + torch.zeros(3, 224, 224), + 'img_masks': + torch.LongTensor([0]).bool(), + 'output_gt': + answer_list[0] if self.split == 'train' else answer_list, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoPlan3DLLM(LeoBase): + + def __init__(self, cfg, split): + super().__init__() + self.scannet_base = cfg.data.plan_3dllm.scannet_base + self.num_points = cfg.data.plan_3dllm.num_points + self.max_obj_len = cfg.data.plan_3dllm.max_obj_len + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.plan_3dllm, 'pc_type', 'gt') + + logger.info(f'Loading LeoPlan3DLLM {split}-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.plan_3dllm.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoPlan3DLLM {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + lang_data = [] + anno_file = os.path.join( + anno_dir, f'3d_llm_embodied_planning_filtered_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for item in json_data: + scan_ids.append(item['scene_id']) + lang_data.append({ + 'goal': item['question'].lstrip('### human: '), + 'plan': item['answers'][0], + }) + + return scan_ids, lang_data + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + goal_plan_pair = self.lang_data[index] + goal = goal_plan_pair['goal'] + plan = goal_plan_pair['plan'] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + if self.pc_type == 'gt': + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + selected_obj_pcds = list(obj_pcds.values()) + else: + # only for evaluation with object proposals + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + selected_obj_pcds = obj_pcds + + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + + data_dict = self.get_prompts( + instruction=goal, + situation='', + ) + data_dict.update({ + 'source': 'scannet', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': plan, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoDialog3DLLM(LeoBase): + + def __init__(self, cfg, split): + super().__init__() + self.scannet_base = cfg.data.dialog_3dllm.scannet_base + self.num_points = cfg.data.dialog_3dllm.num_points + self.max_obj_len = cfg.data.dialog_3dllm.max_obj_len + + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + else: + self.split = 'val' + self.pc_type = getattr(cfg.data.dialog_3dllm, 'pc_type', 'gt') + + logger.info(f'Loading LeoDialog3DLLM {split}-set language') + self.scan_ids, self.lang_data = self.load_anno( + cfg.data.dialog_3dllm.anno_dir) + # scan_ids may be repeatitive + logger.info( + f'Finish loading LeoDialog3DLLM {split}-set language, collected {len(self.scan_ids)} data' + ) + + self.scan_data = {} + + def load_anno(self, anno_dir): + scan_ids = [] + lang_data = [] + anno_file = os.path.join( + anno_dir, f'3d_llm_embodied_dialogue_filtered_{self.split}.json') + with open(anno_file, 'r') as f: + json_data = json.load(f) + for item in json_data: + scan_ids.append(item['scene_id']) + history = item['question'] + history = history.replace('### human:', 'USER:') + history = history.replace('### assistant:', 'ASSISTANT:') + history = history.lstrip('') + lang_data.append({ + 'history': history, + 'response': item['answers'][0], + }) + + return scan_ids, lang_data + + def __len__(self): + return len(self.scan_ids) + + def __getitem__(self, index): + scan_id = self.scan_ids[index] + dialog_pair = self.lang_data[index] + history = dialog_pair['history'] + response = dialog_pair['response'] + + # load pcds + if scan_id not in self.scan_data: + self.scan_data[scan_id] = self.load_scannet(scan_id) + + if self.pc_type == 'gt': + obj_pcds = self.scan_data[scan_id]['obj_pcds'].copy( + ) # Dict{int: np.ndarray (N, 6)} + selected_obj_pcds = list(obj_pcds.values()) + else: + # only for evaluation with object proposals + obj_pcds = self.scan_data[scan_id]['obj_pcds_pred'].copy( + ) # List[np.ndarray (N, 6)] + selected_obj_pcds = obj_pcds + + if self.split == 'train': + random.shuffle(selected_obj_pcds) + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=False) + + data_dict = self.get_prompts( + instruction=None, + dialogue=history, + situation='', + ) + data_dict.update({ + 'source': 'scannet', + 'scene_id': scan_id, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': response, + }) + + return self.check_output_and_fill_dummy(data_dict) + + +from glob import glob + + +def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor: + """Return the rotation matrices for one of the rotations about an axis of + which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == 'X': + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == 'Y': + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == 'Z': + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError('letter must be either X, Y or Z.') + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: torch.Tensor, + convention: str) -> torch.Tensor: + """Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError('Invalid input euler angles.') + if len(convention) != 3: + raise ValueError('Convention must have 3 letters.') + if convention[1] in (convention[0], convention[2]): + raise ValueError(f'Invalid convention {convention}.') + for letter in convention: + if letter not in ('X', 'Y', 'Z'): + raise ValueError(f'Invalid letter {letter} in convention string.') + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, torch.unbind(euler_angles, -1)) + ] + # return functools.reduce(torch.matmul, matrices) + return torch.matmul(torch.matmul(matrices[0], matrices[1]), matrices[2]) + + +def euler_to_matrix_np(euler): + # euler: N*3 np array + euler_tensor = torch.tensor(euler) + matrix_tensor = euler_angles_to_matrix(euler_tensor, 'ZXY') + return matrix_tensor.numpy() + + +def is_inside_box(points, center, size, rotation_mat): + """Check if points are inside a 3D bounding box. + + Args: + points: 3D points, numpy array of shape (n, 3). + center: center of the box, numpy array of shape (3, ). + size: size of the box, numpy array of shape (3, ). + rotation_mat: rotation matrix of the box, numpy array of shape (3, 3). + Returns: + Boolean array of shape (n, ) indicating if each point is inside the box. + """ + assert points.shape[1] == 3, 'points should be of shape (n, 3)' + center = np.array(center) # n, 3 + size = np.array(size) # n, 3 + rotation_mat = np.array(rotation_mat) + assert rotation_mat.shape == ( + 3, 3), f'R should be shape (3,3), but got {rotation_mat.shape}' + # pcd_local = (rotation_mat.T @ (points - center).T).T The expressions are equivalent + pcd_local = (points - center) @ rotation_mat # n, 3 + pcd_local = pcd_local / size * 2.0 # scale to [-1, 1] # n, 3 + pcd_local = abs(pcd_local) + return (pcd_local[:, 0] <= 1) & (pcd_local[:, 1] <= 1) & (pcd_local[:, 2] + <= 1) + + +from mmscan import MMScan + + +def pcd_color_transformer(pcd): + """_ Transform the color of the point cloud to [-1, 1]""" + pcd[:, 3:6] = pcd[:, 3:6] * 2 - 1 + return pcd + + +@DATASET_REGISTRY.register() +class LeoEmbodiedScanL(LeoBase): + situation_pool = Leo_situation_pool + instruction_pool = Leo_objcap_instruction_pool + + def __init__(self, cfg, split): + super().__init__() + + self.num_points = cfg.data.embodied_scan_l.num_points + self.max_obj_len = cfg.data.embodied_scan_l.max_obj_len + + self.cfg = cfg + if split == 'train': + self.split = 'train' + self.pc_type = 'gt' + + else: + self.split = 'val' + self.pc_type = 'gt' + + self.MMScan_loader = MMScan(version='v1', + split=split, + ratio=1.0 if split == 'train' else 0.1, + task='MMScan-QA') + + def __len__(self): + return len(self.MMScan_loader) + + def __getitem__(self, index): + + return self.parse_dict(self.MMScan_loader[index]) + + def parse_dict(self, data_dict): + scan_id = data_dict['scan_id'] + ID = data_dict['ID'] + obj_id = data_dict['input_bboxes_id'] + + if self.split == 'train': + obj_caption = data_dict['answers'][0] + else: + obj_caption = data_dict['answers'] + + input_bboxes = data_dict['input_bboxes'] + + # may this cause a bug, now it's all ok + obj_pcds = {} + for object_id in data_dict['obj_pcds'].keys(): + obj_pcds[object_id] = pcd_color_transformer( + data_dict['obj_pcds'][object_id]) + + scan_pcds = data_dict['pcds'] + + iou_flag = 1 + + if obj_id is not None and len(obj_id) > 0 and scan_pcds.shape[0] > 0: + all_obj_mask = [] + + for input_bbox in input_bboxes: + bbox = np.array(input_bbox) + orientation = np.array( + euler_to_matrix_np(bbox[np.newaxis, 6:])[0]) + position = np.array(bbox[:3]) + size = np.array(bbox[3:6]) + all_obj_mask.append( + torch.tensor(is_inside_box(scan_pcds[:, :3], position, + size, orientation), + dtype=bool)) + query_instance_mask = torch.stack(all_obj_mask) + query_instance_mask = torch.any(query_instance_mask, dim=0) + if scan_pcds[query_instance_mask].shape[0] > 0: + selected_obj_pcds = [scan_pcds[query_instance_mask]] + else: + # no match + selected_obj_pcds = [] + + else: + selected_obj_pcds = [] + remained_obj_idx = [i for i in obj_pcds.keys()] + + num_selected_obj = len(selected_obj_pcds) + if num_selected_obj >= self.max_obj_len: + selected_obj_pcds = selected_obj_pcds[:self.max_obj_len] + else: + if self.split == 'train': + random.shuffle(remained_obj_idx) + selected_obj_pcds.extend([ + obj_pcds[i] + for i in remained_obj_idx[:self.max_obj_len - num_selected_obj] + ]) + # all point clouds if there's not box in the es-anno + if len(selected_obj_pcds) == 0: + selected_obj_pcds = [scan_pcds] + obj_fts, obj_locs, anchor_loc = self.preprocess_pcd( + selected_obj_pcds, return_anchor=obj_id is not None) + + data_dict = self.get_prompts( + instruction=random.choice(self.instruction_pool), + situation=random.choice(self.situation_pool) + if obj_id is not None else None, + dialogue=data_dict['question']) + + # 2D images set to zeros + data_dict.update({ + 'source': ID, + 'scene_id': scan_id, + 'question_id': ID, + 'obj_fts': obj_fts, + 'obj_locs': obj_locs, + 'anchor_locs': anchor_loc, + 'img_fts': torch.zeros(3, 224, 224), + 'img_masks': torch.LongTensor([0]).bool(), + 'output_gt': obj_caption, + 'iou_flag': torch.LongTensor([iou_flag]).bool(), + }) + + return self.check_output_and_fill_dummy(data_dict) + + +@DATASET_REGISTRY.register() +class LeoMix(Dataset): + mapping = { + 'cap3d': LeoCap3D, + 'obj_scene_cap': LeoObjSceneCap, + 'scene_cap': LeoSceneCap, + 'scan2cap': LeoScan2Cap, + 'nr3d': LeoNr3D, + 'scanqa': LeoScanQA, + 'sqa3d': LeoSQA3D, + 'rscan_qa': Leo3RScanQA, + 'rscan_plan': Leo3RScanPlan, + 'rscan_dialog': Leo3RScanDialog, + 'mp3d_objnav': LeoMP3DObjNav, + 'cliport': LeoCLIPort, + 'scene_cap_3dllm': LeoSceneCap3DLLM, + 'qa_3dllm': LeoQA3DLLM, + 'plan_3dllm': LeoPlan3DLLM, + 'dialog_3dllm': LeoDialog3DLLM, + 'embodied_scan_l': LeoEmbodiedScanL, + } + + def __init__(self, cfg, split): + self.datasets = [] + self.ratio = cfg.task.leomix.ratio + logger.info(f'LeoMix about to load: {cfg.task.leomix.mix}') + for dataset in cfg.task.leomix.mix: + self.datasets.append(self.mapping[dataset](cfg, split)) + + if type(self.ratio) == int or type(self.ratio) == float: + self.index_range = list( + np.cumsum([int(len(d) * self.ratio) for d in self.datasets])) + else: + self.index_range = list( + np.cumsum([ + int(len(d) * self.ratio[i]) + for i, d in enumerate(self.datasets) + ])) + self.index_range = [0] + self.index_range + logger.info(f'Indices of LeoMix datasets: {self.index_range}') + + def __len__(self): + return self.index_range[-1] + + @staticmethod + def streamline_output(data_dict): + new_data_dict = {} + for key in LEOMIX_REQUIRED_KEYS: + if key not in data_dict: + raise ValueError(f'Key {key} is missing in LeoMix data_dict') + else: + new_data_dict[key] = data_dict[key] + return new_data_dict + + def __getitem__(self, index): + for i in range(len(self.index_range) - 1): + if self.index_range[i] <= index < self.index_range[i + 1]: + data_dict = self.datasets[i][index - self.index_range[i]] + break + + return self.streamline_output(data_dict) + + +if __name__ == '__main__': + loader = LeoEmbodiedScanL(None, 'train') + print(loader[100]) diff --git a/models/LEO/data/eai.py b/models/LEO/data/eai.py new file mode 100644 index 0000000..b2b595f --- /dev/null +++ b/models/LEO/data/eai.py @@ -0,0 +1,323 @@ +import csv +import os + +import numpy as np +from data.data_utils import VICUNA_ACTION_TOKENS +from transforms3d import euler + +HABITAT_ACTION_SPACE = { + 'stop': 0, + 'move_forward': 1, + 'move_backward': 2, + 'turn_left': 3, + 'turn_right': 4, + 'look_up': 5, + 'look_down': 6, + 'grab_release': 7 +} + +HABITAT_ACTION_SPACE_REVERSE = {v: k for k, v in HABITAT_ACTION_SPACE.items()} + +HABITAT_ACTION_SPACE_TOKENIZE = { + k: v + for k, v in zip( + list(HABITAT_ACTION_SPACE.values()), + list(VICUNA_ACTION_TOKENS.keys())[:len(HABITAT_ACTION_SPACE)]) +} + +HABITAT_ACTION_SPACE_DETOKENIZE = { + v: k + for k, v in zip( + list(HABITAT_ACTION_SPACE.values()), + list(VICUNA_ACTION_TOKENS.keys())[:len(HABITAT_ACTION_SPACE)]) +} + +shapenetcore_pp = [ + 1, 6, 9, 11, 12, 14, 18, 23, 24, 25, 28, 31, 33, 35, 37, 41, 46, 48, 50, + 54, 56, 61, 65, 68, 72, 77, 80, 81, 82, 84, 87, 88, 93, 94, 96, 102, 107, + 112, 114, 115, 118, 125, 127, 130, 133, 141, 145, 150, 151, 159, 173, 175, + 194, 200, 206, 207, 208, 214, 215, 216, 226, 227, 229, 236, 238, 243, 246, + 247, 250, 259, 264, 266, 267, 269, 274, 279, 281, 288, 292, 294, 301, 306, + 308, 313, 322, 327, 331, 339, 358, 361, 366, 368, 387, 394, 396, 401, 402, + 404, 406, 407, 408, 419, 426, 442, 446, 452, 459, 461, 471, 473, 476, 483, + 487, 488, 490, 499, 501, 518, 529, 533, 540, 543, 554, 571, 576, 579, 580, + 589, 602, 608, 618, 621, 635, 639, 645, 660, 676, 677, 681, 683, 686, 694, + 697, 699, 709, 710, 713, 716, 728, 731, 738, 751, 755, 763, 768, 770, 783, + 795, 800, 804, 814, 815, 834, 838, 841, 844, 845, 851, 855, 856, 857, 863, + 870, 872, 874, 880, 888, 902, 915, 919, 931, 932, 943, 944, 946, 953, 956, + 967, 975, 983, 984, 985, 989, 990, 997, 1004, 1006, 1007, 1016, 1017, 1021, + 1038, 1062, 1066, 1074, 1080, 1093, 1102, 1115, 1119, 1121, 1122, 1124, + 1125, 1127, 1130, 1133, 1138, 1141, 1145, 1146, 1152, 1153, 1163, 1173, + 1184, 1192, 1193, 1194, 1198, 1208, 1211, 1214, 1226, 1238, 1246, 1247, + 1255, 1257, 1258, 1260, 1274, 1285, 1294, 1307, 1321, 1329, 1332, 1348, + 1350, 1351, 1376, 1378, 1398, 1400, 1403, 1408, 1409, 1415, 1417, 1423, + 1429, 1445, 1451, 1463, 1472, 1476, 1478, 1479, 1487, 1497, 1508, 1509, + 1510, 1518, 1528, 1537, 1540, 1543, 1549, 1551, 1563, 1565, 1569, 1582, + 1593, 1603, 1618, 1621, 1646, 1652, 1656 +] + + +class CLIPortTokenizer: + _BOUNDS = np.array([[0.25, 0.75], [-0.5, 0.5], [0, 0.28]]) + _PIXEL_SIZE = 0.003125 + _RESOLUTIN = np.array([320, 160]) + _ZBINS = 36 + _ZROT_EPSILLON = 0. + + def __init__(self) -> None: + self.num_tokens_u = self._RESOLUTIN[0] + self.num_tokens_v = self._RESOLUTIN[1] + self.num_tokens_z = self._ZBINS + + def tokenize(self, act_pose: tuple) -> tuple: + """Convert the action to a token.""" + act_trans = act_pose[0] + act_quat = act_pose[1] + ## limit the act_trans to the bounds + act_trans[0] = min(max(act_trans[0], self._BOUNDS[0, 0]), + self._BOUNDS[0, 1]) + act_trans[1] = min(max(act_trans[1], self._BOUNDS[1, 0]), + self._BOUNDS[1, 1]) + + u = int( + np.round((act_trans[1] - self._BOUNDS[1, 0]) / self._PIXEL_SIZE)) + v = int( + np.round((act_trans[0] - self._BOUNDS[0, 0]) / self._PIXEL_SIZE)) + ## set u, v to bound if out of bound + u = min(max(u, 0), self._RESOLUTIN[0] - 1) + v = min(max(v, 0), self._RESOLUTIN[1] - 1) + ## quat to eulerXYZ + quaternion_wxyz = np.array( + [act_quat[3], act_quat[0], act_quat[1], act_quat[2]]) + euler_zxy = euler.quat2euler(quaternion_wxyz, axes='szxy') + z_rot_cont = euler_zxy[0] + ## convert z_rot_cont to -pi ~ pi + # z_rot_cont = z_rot_cont % (2 * np.pi) - np.pi + # z_rot = int((z_rot_cont + self._ZROT_EPSILLON) / (2 * np.pi / self._ZBINS)) + (self._ZBINS // 2) - 1 + ## convert z_rot_cont to 0 ~ 2pi + z_rot_cont = z_rot_cont % (2 * np.pi) + z_rot = int( + (z_rot_cont + self._ZROT_EPSILLON) / (2 * np.pi / self._ZBINS)) + + ## convert to token id + act_token = (u, v, z_rot) + return act_token + + def detokenize(self, act_token: tuple, hmap=None) -> tuple: + """Recover the action from the token.""" + u, v, z_rot = act_token + if hmap is None: + hmap = -np.ones((self._RESOLUTIN[0], self._RESOLUTIN[1])) + x = self._BOUNDS[0, 0] + v * self._PIXEL_SIZE + y = self._BOUNDS[1, 0] + u * self._PIXEL_SIZE + z = self._BOUNDS[2, 0] + hmap[u, v] + xyz = np.array([x, y, z]) + + z_rot = z_rot - (self._ZBINS // 2) + z_rot_cont = z_rot * (2 * np.pi / self._ZBINS) + z_rot_cont = z_rot_cont + 0.5 * (2 * np.pi / self._ZBINS) + z_rot_cont = z_rot_cont % (2 * np.pi) - np.pi + quaternion_wxyz = euler.euler2quat(*(z_rot_cont, 0., 0.), axes='szxy') + quaternion_xyzw = np.array([ + quaternion_wxyz[1], quaternion_wxyz[2], quaternion_wxyz[3], + quaternion_wxyz[0] + ]) + + return (xyz, quaternion_xyzw) + + +_cliport_tokenizer = CLIPortTokenizer() + +_CLIPORT_ACTION_SPACE_U = { + k: v + for k, v in zip( + range(_cliport_tokenizer.num_tokens_u), + list(VICUNA_ACTION_TOKENS.keys()) + [len(HABITAT_ACTION_SPACE):len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u]) +} + +_CLIPORT_ACTION_SPACE_U_REVERSE = { + v: k + for k, v in zip( + range(_cliport_tokenizer.num_tokens_u), + list(VICUNA_ACTION_TOKENS.keys()) + [len(HABITAT_ACTION_SPACE):len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u]) +} + +_CLIPORT_ACTION_SPACE_V = { + k: v + for k, v in zip( + range(_cliport_tokenizer.num_tokens_v), + list(VICUNA_ACTION_TOKENS.keys()) + [len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u:len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u + _cliport_tokenizer.num_tokens_v]) +} + +_CLIPORT_ACTION_SPACE_V_REVERSE = { + v: k + for k, v in zip( + range(_cliport_tokenizer.num_tokens_v), + list(VICUNA_ACTION_TOKENS.keys()) + [len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u:len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u + _cliport_tokenizer.num_tokens_v]) +} + +_CLIPORT_ACTION_SPACE_ZROT = { + k: v + for k, v in zip( + range(_cliport_tokenizer.num_tokens_z), + list(VICUNA_ACTION_TOKENS.keys()) + [len(HABITAT_ACTION_SPACE) + _cliport_tokenizer.num_tokens_u + + _cliport_tokenizer.num_tokens_v:len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u + _cliport_tokenizer.num_tokens_v + + _cliport_tokenizer.num_tokens_z]) +} + +_CLIPORT_ACTION_SPACE_ZROT_REVERSE = { + v: k + for k, v in zip( + range(_cliport_tokenizer.num_tokens_z), + list(VICUNA_ACTION_TOKENS.keys()) + [len(HABITAT_ACTION_SPACE) + _cliport_tokenizer.num_tokens_u + + _cliport_tokenizer.num_tokens_v:len(HABITAT_ACTION_SPACE) + + _cliport_tokenizer.num_tokens_u + _cliport_tokenizer.num_tokens_v + + _cliport_tokenizer.num_tokens_z]) +} + +_DUMMY_CLIPORT_ACTION = { + 'pose0': (_cliport_tokenizer._BOUNDS[:, 0], np.array([0., 0., 0., 1.])), + 'pose1': (_cliport_tokenizer._BOUNDS[:, 0], np.array([0., 0., 0., 1.])) +} + + +def CLIPORT_ACTION_SPACE_TOKENIZE(action): + global _cliport_tokenizer + action_tokens = list(_cliport_tokenizer.tokenize(action)) + action_tokens[0] = _CLIPORT_ACTION_SPACE_U[action_tokens[0]] + action_tokens[1] = _CLIPORT_ACTION_SPACE_V[action_tokens[1]] + action_tokens[2] = _CLIPORT_ACTION_SPACE_ZROT[action_tokens[2]] + return action_tokens + + +def CLIPORT_ACTION_SPACE_DETOKENIZE(token, obs=None): + global _cliport_tokenizer + u = _CLIPORT_ACTION_SPACE_U_REVERSE[token[0]] + v = _CLIPORT_ACTION_SPACE_V_REVERSE[token[1]] + z_rot = _CLIPORT_ACTION_SPACE_ZROT_REVERSE[token[2]] + hmap = obs['depthmap'] if obs is not None else None + return _cliport_tokenizer.detokenize((u, v, z_rot), hmap) + + +def _extract_between(lst, start, end, padding): + # start and end are both included. + + # Calculate the desired output length + length = end - start + 1 + + # Pad the list at the beginning and end + lst = [padding] * max(0, -start) + lst + [padding] * max( + 0, end - len(lst) + 1) + + # Adjust the start and end based on the padding + start += max(0, -start) + end = start + length - 1 + + return lst[start:end + 1] + + +def filter_object_type(objects): + obj_list = [] + for obj_pcd in objects: + # TODO(jxma): we assume the last column is the semantic label + sem = obj_pcd[0, -1] - 1 + if sem in shapenetcore_pp: + obj_list.append(obj_pcd) + return obj_list + + +def read_label_mapping(filename, label_from='category', label_to='index'): + # mapping start from 0 + assert os.path.isfile(filename) + mapping = dict() + rmapping = dict() + with open(filename) as tsvfile: + tsvfile_content = tsvfile.read().replace(' ', '\t') + reader = csv.DictReader(tsvfile_content.splitlines(), delimiter='\t') + for row in reader: + mapping[row[label_from]] = int(row[label_to]) - 1 + rmapping[int(row[label_to]) - 1] = row[label_from] + return mapping, rmapping + + +def _process_object_feature(obj_list, + max_obj_points, + label_mapping_path=None, + habitat_alignment=False): + # label_mapping_path: + # path to "matterport_category_mappings.tsv" + # + # - **all object pc will be centralized** + # - you may provide a local seed to make this deterministic + obj_list = np.array(obj_list, dtype=object) + obj_fts = [] + obj_locs = [] + if label_mapping_path is not None: + obj_sems = [] + _, mapping = read_label_mapping(label_mapping_path) + for obj_pcd in obj_list: + # TODO(jxma): Align obj pc with habitat coordinate + if habitat_alignment: + obj_pcd[:, 1], obj_pcd[:, 2] = obj_pcd[:, 2], -obj_pcd[:, 1] + obj_center = obj_pcd[:, :3].mean(0) + obj_size = obj_pcd[:, :3].max(0) - obj_pcd[:, :3].min(0) + obj_locs.append(np.concatenate([obj_center, obj_size], 0)) + pcd_idxs = np.random.choice(len(obj_pcd), + size=max_obj_points, + replace=(len(obj_pcd) < max_obj_points)) + obj_pcd = obj_pcd[pcd_idxs] + + # TODO(jxma): now we just centralized all object pc + obj_pcd[:, :3] = obj_pcd[:, :3] - obj_pcd[:, :3].mean(0) + max_dist = np.max(np.sqrt(np.sum(obj_pcd[:, :3]**2, 1))) + if max_dist < 1e-6: # take care of tiny point-clouds, i.e., padding + max_dist = 1 + obj_pcd[:, :3] = obj_pcd[:, :3] / max_dist # xyz normalize + + obj_pcd[:, 3: + 6] = obj_pcd[:, 3: + 6] * 2 - 1 # rgb normalize (saved pointcloud have rgb as uint8 / 255.0) + obj_fts.append(obj_pcd[:, :6]) # RGBXYZ + if label_mapping_path is not None: + # TODO(jxma): we assumt the last column is the semantic label + sem = obj_pcd[0, -1] - 1 # from 1-1659 to 0-1658 + obj_sems.append(mapping[sem]) + + obj_fts = np.stack(obj_fts, 0).astype(np.float32) + obj_locs = np.array(obj_locs).astype(np.float32) + obj_masks = np.ones(len(obj_locs)).astype(np.uint8) + if label_mapping_path is not None: + obj_sems = np.array(obj_sems).astype(object) + return obj_fts, obj_locs, obj_masks, obj_sems + else: + return obj_fts, obj_locs, obj_masks + + +def prepare_object_feature_habitat(object_path, label_mapping_path, + max_obj_points): + # object_path: + # path to "objects.npy" + # label_mapping_path: + # path to "matterport_category_mappings.tsv" + # + # - **all object pc will be centralized** + # - you may provide a local seed to make this deterministic + obj_list = np.load(object_path, allow_pickle=True) + obj_list = filter_object_type(obj_list) + return _process_object_feature(obj_list, + max_obj_points, + label_mapping_path, + habitat_alignment=True) diff --git a/models/LEO/data/text_pool.py b/models/LEO/data/text_pool.py new file mode 100644 index 0000000..75b5297 --- /dev/null +++ b/models/LEO/data/text_pool.py @@ -0,0 +1,532 @@ +Leo_objcap_instruction_pool = [ + 'Produce a description for the object at the chosen spot in the 3D scene.', + 'How would you depict the object located at the selected point in the 3D environment?', + 'Formulate a description of the item at the picked position within the 3D scene.', + 'How would you describe the entity at the designated location in the 3D backdrop?', + 'Can you detail the object situated at the selected point in the 3D setting?', + 'Compose a narrative for the object at the chosen locale within the 3D environment.', + 'What does the object at the specified position in the 3D visualization look like?', + 'Provide a description for the item located at the marked site in the 3D world.', + 'How would you illustrate the object placed at the selected spot in the 3D landscape?', + 'Craft a depiction of the object at the pinpointed location within the 3D territory.', + 'What kind of object is illustrated at the identified site in the 3D tableau?', + 'Develop a description of the object at the specified position in the 3D backdrop.', + "What is the entity's detail at the highlighted site in the 3D view?", + 'Write up a description of the entity at the selected spot in the 3D realm.', + 'What does the object look like at the pinpointed location in the 3D space?', + 'Detail the entity located at the chosen position within the 3D scene.', + 'Can you explain the essence of the object at the selected spot in the 3D zone?', + 'Frame a depiction of the entity resting at the demarcated area in the 3D presentation.', + 'Broadcast the details of the object at the noted sector in the 3D zone.', + 'Deliver a comprehensive look at the item at the desired locale in the 3D realm.', + 'Provide an exposition on the object at the selected vertex in the 3D projection.', + 'Paint a picture of the entity sitting at the defined spot in the 3D area.', + 'Give a run-down of the object standing at the decided point in the 3D scene.', + 'Jot down the specifics of the item at the agreed position in the 3D depiction.', + 'Spell out the details of the object at the exact location in the 3D display.', + 'How do you interpret the entity at the set coordinate in the 3D setting?', + 'Explain the intricacies of the object at the established spot in the 3D landscape.', + 'Shed light on the item found at the appointed place in the 3D universe.', + 'Give your take on the object situated at the designated site in the 3D realm.', + 'Expand on the details of the entity at the chosen location in the 3D scene.', + 'Draft a portrait of the object lying at the pinpointed section in the 3D domain.', + 'Tell about the object at the selected spot in the 3D environment.', + "What's the object like at the specified position in the 3D scene?", + 'State the details of the object at the chosen location in the 3D display.', + 'Describe what you see at the picked spot in the 3D view.', + 'What does the selected location in the 3D scene show?', + 'Mention the object at the highlighted spot in the 3D environment.', + 'Share details of the object at the selected spot in the 3D layout.', + 'Report on the item at the specific point in the 3D realm.', + 'Express the details of the object at the marked location in the 3D scene.', + "Tell us what's at the pinpointed spot in the 3D representation.", + 'Point out the object at the identified position in the 3D visualization.', + 'Say what the object looks like at the picked location in the 3D space.', + 'Detail the item at the marked spot in the 3D setup.', + 'What can you observe at the chosen point in the 3D scene?', + 'Recount what the object is like at the given spot in the 3D area.', + 'Note the entity at the designated position in the 3D display.', + "Clarify the object's appearance at the chosen site in the 3D backdrop.", + 'Explain the object at the pinpointed location in the 3D setting.', + 'Identify the object located at the specific point in the 3D environment.', + 'Share the characteristics of the object at the selected spot in the 3D world.', + 'How does the object appear at the marked location in the 3D view?', + "Describe the object's features at the identified spot in the 3D realm.", + 'Provide info on the entity at the selected coordinate in the 3D scene.', + 'Relate the appearance of the object at the given site in the 3D space.', + 'Tell the nature of the object at the chosen position in the 3D scene.', + 'State what you notice at the pointed location in the 3D representation.', + 'Give an account of the item at the decided spot in the 3D domain.', + 'Inform about the object at the specified place in the 3D landscape.', + 'Brief about the entity present at the marked spot in the 3D visualization.', + 'Talk about the object at the picked point in the 3D display.', + 'Write down what you see at the chosen location in the 3D setting.', + 'Indicate the nature of the object at the selected site in the 3D projection.', + 'Clarify what stands at the particular position in the 3D environment.', + 'Discuss the object at the determined spot in the 3D backdrop.', + 'Offer insights on the item at the identified place in the 3D view.', + 'State characteristics of the entity at the pinpointed location in the 3D scene.', + 'Provide specifics of the object at the marked area in the 3D realm.', + 'Describe the item you observe at the chosen spot in the 3D display.', + 'Note details of the object at the selected position in the 3D framework.', + 'What was the object like at the chosen location in the 3D scene?', + 'Had you noticed any object at the pinpointed place in the 3D display?', + "I'd like to know about the entity at the mentioned location in the 3D view.", + 'Were there any notable features of the object at that 3D scene location?', + 'How did the object in the mentioned 3D position appear to you?', + 'There was a mention of an object in the 3D scene; can you detail it?', + 'Can you look back and describe the entity from the 3D location?', + 'What caught your attention about the object at the said 3D location?', + 'Recount the specifics of the entity from the past 3D scene description.', + "I'd like a recap on the object at the highlighted spot in the 3D view.", + 'Were you able to capture details of the object at the 3D site?', + 'From your perspective, how did the object at the 3D point appear?', + 'Walk me through the details of the entity at the pointed 3D location.', + 'What were your observations about the object at that 3D spot?', + 'Looking back, how would you describe the object in that 3D location?', + 'If you were to re-describe the object in the 3D realm, what would you say?', + "What's your take on the object at the indicated spot in the 3D area?", + 'Could you revisit and elaborate on the object in that particular 3D scene?', + 'In the 3D scene, which object caught your attention at the chosen spot?', + 'What are the main features of the object at the identified 3D site?', + 'Focusing on the 3D scene, describe the object at the indicated spot.', + 'Provide a clearer description of the object at that specific 3D spot.', + 'Highlight the main aspects of the object at the selected location in the 3D realm.', + 'Can you give more insight into the object at the marked position in the 3D view?', + "What's significant about the object at the chosen 3D location?", + "Let's go back to the object at the noted spot in the 3D scene. What can you tell?", + 'Provide an overview of the object at the identified position in the 3D display.', + 'Discuss the object found at the selected point in the 3D backdrop.', + "I'd like a straightforward description of the object at the 3D location you mentioned.", + 'Re-focus on the object at the pinpointed 3D site and describe it.', + 'Bring up details about the object at the specific 3D location again.', + 'Emphasize the main characteristics of the object at the highlighted 3D spot.', + "I'm interested in the object at the given 3D position. Can you describe it?", + 'Break down the details of the object located at that point in the 3D realm.', + 'Help me understand the object at the indicated spot in the 3D visualization.', + 'Return to the object in question in the 3D scene and detail it.', + 'I want to hear more about the object at the selected 3D location.', + 'Offer a simple depiction of the object at the mentioned spot in the 3D setting.', + 'How would you define the object at the identified location in the 3D environment?', + 'Shed light on the object placed at the pinpointed 3D site.', + 'Could you revisit the object at the marked spot in the 3D realm?', + 'Walk me through the features of the object at the specific 3D location.', + 'Referring back to the 3D scene, detail the object at the selected spot.', + 'Describe the object at that particular 3D spot.', + 'What can you tell about the object at the chosen 3D position?', + 'Highlight the object at the indicated spot in the 3D view.', + 'Can you discuss the object at the marked location in the 3D scene?', + 'Please focus on the object at the selected 3D point.', + 'Give details about the object at the picked 3D location.', + 'Tell me about the object at the noted spot in the 3D environment.', + 'Describe the object at the given 3D location again.', + 'Please revisit the object at the selected point in the 3D realm.', + "I'd like to hear about the object at the mentioned 3D spot.", + 'How does the object at the highlighted 3D position look?', + 'Briefly, tell me about the object at the chosen 3D location.', + 'Provide some details on the object at the indicated 3D site.', + 'Zoom in on the object at that particular 3D position.', + 'What do you observe about the object at the selected 3D point?', + 'Share a brief description of the object at the 3D location.', + 'Can you clarify the object at the picked spot in the 3D scene?', + 'How would you briefly describe the object at the 3D site?', + 'I need a quick overview of the object at the mentioned 3D location.', + 'Tell me briefly about the object at the highlighted 3D spot.', + 'What details stand out about the object at the chosen 3D point?', + 'Quickly describe the object at the pinpointed 3D location.', + 'Discuss the object at the specified spot in the 3D backdrop.', + 'Please give a summary of the object at the 3D location.', + "I'm curious about the object at the selected 3D site. Tell me more.", + 'How does the object at the 3D location appear to you?', + 'Can you provide a simple overview of the object at the 3D spot?', + "What's notable about the object at the pinpointed 3D position?", + 'Give me a snapshot of the object at the chosen 3D location.', + 'Describe the primary features of the object at the indicated 3D spot.', + 'What stands out about the object at the marked 3D site?', + 'Offer a quick depiction of the object at the selected 3D point.', + 'Focus on the object at the given 3D location and describe it.', + 'Elaborate briefly on the object at the selected 3D position.', + 'Highlight the basics of the object at the identified 3D spot.', + 'Share your observations on the object at the pointed 3D location.', + 'How would you describe the object at the 3D point in a few words?', + 'Give me the key details of the object at the specified 3D spot.' +] + +Leo_situation_pool = ['You are at a selected location in the 3D scene.'] +# Leo_situation_pool = [ +# "A location in the 3D scene is selected", +# "A point in the 3D environment has been chosen", +# "A position within the 3D setting is picked", +# "In the 3D scene, a location has been selected", +# "A spot in the 3D context is opted for", +# "Within the 3D backdrop, a location was identified", +# "A place in the 3D representation was highlighted", +# "A site within the three-dimensional scene is chosen", +# "A particular point in the 3D scene has been earmarked", +# "A location is pinpointed in the 3D environment", +# "In the three-dimensional setting, a place has been opted for", +# "A location in the 3D space has been marked", +# "A selection is made at a location in the 3D scene", +# "A specific location in the 3D view was picked out", +# "A point within the 3D landscape is chosen", +# "A spot in the 3D visualization is selected", +# "Within the 3D domain, a particular spot is identified", +# "A location in the tri-dimensional scene is decided upon", +# "In the 3D area, a specific site is highlighted", +# "One location within the 3D scene has been chosen", +# "A position in the 3D realm is selected", +# "A specific point in the 3D expanse is determined", +# "A spot is singled out in the 3D scene", +# "Within the 3D scene, a location gets chosen", +# "A specific location in the 3D area gets pinpointed", +# "In the 3D tableau, a point is selected", +# "A position in the three-dimensional context is marked", +# "A spot in the 3D territory has been picked", +# "In the 3D world, a location is singled out", +# "A point in the 3D scene gets determined", +# "A place within the 3D framework is chosen", +# "A site in the 3D panorama is selected", +# "A location in the 3D dimension is picked out", +# "Within the 3D universe, a point is earmarked", +# "A space in the 3D world has been chosen", +# "In the 3D display, a location is picked", +# "A site in the 3D realm gets highlighted", +# "A spot is chosen within the three-dimensional world", +# "A position in the 3D scene is zeroed in on", +# "A locale in the 3D environment is marked", +# "In the 3D zone, a place is singled out", +# "A location within the 3D canvas is noted", +# "A point is selected within the 3D model", +# "A specific site in the 3D view is opted for", +# "A location in the 3D plane gets highlighted", +# "Within the 3D sphere, a site is chosen", +# "A spot in the 3D depiction is decided upon", +# "A point in the three-dimensional space is identified", +# "A location in the 3D perspective is selected", +# "In the 3D projection, a site is pinpointed", +# "A selection is made at a location in the 3D scene", +# "A specific location in the 3D view was picked out", +# "Within the 3D framework, a spot was selected", +# "A point has been determined in the 3D landscape", +# "A location in the 3D area is now chosen", +# "A selection of a spot was made in the 3D territory", +# "In the 3D arena, a point was highlighted", +# "A location in the 3D sphere was chosen", +# "One can see a selected spot in the 3D scene", +# "A point within the 3D domain has been chosen", +# "A site in the 3D realm was picked", +# "A location in the 3D environment has been indicated", +# "A position in the 3D visualization is marked", +# "In the 3D display, a location has been pinpointed", +# "A specific point in the 3D field was highlighted", +# "One point in the 3D dimension is now selected", +# "A site was earmarked within the 3D view", +# "A location has been picked out in the 3D visualization", +# "In the 3D model, a point has been highlighted", +# "A site is marked within the 3D landscape", +# "In the 3D configuration, a location was chosen", +# "Within the 3D perspective, a point was earmarked", +# "A particular site in the 3D scene is marked", +# "A spot is indicated within the 3D realm", +# "The chosen point lies within the 3D scene", +# "A location within the 3D territory has been picked", +# "Within the 3D context, a spot was selected", +# "A point is designated in the 3D landscape", +# "In the 3D depiction, a specific location was chosen" +# ] + +Leo_scenecap_instruction_pool = [ + 'Describe this scene.', 'Generate a description of this scene.', + 'Generate a caption of this scene.', 'Can you describe the scene?', + 'Can you generate a description of the scene?', + 'Can you generate a caption of the scene?', 'Summarize this scene.', + "Provide an outline of this 3D scene's characteristics.", + 'How would you describe the 3D scene?', + 'How would you summarize this scene?', + 'Convey a summary of the 3D structure of this scene.', + 'How would you interpret this 3D scene?', + 'Offer a summary of the 3D scene.', + 'Can you describe this scene in detail?', + "I'm interested in this scene, can you explain?", + 'What is this scene made of?', + 'Could you provide more info about this scene?', + 'Could you describe the detailed structure of this scene?', + 'What can you tell me about this scene?', + 'I want to know more about this scene, can you help?', + 'Can you walk me through the details of this scene?', + 'Can you provide a comprehensive account of this scene?', + 'Offer a detailed interpretation of this scene.', + 'Could you provide an in-depth description of this scene?', + 'Elaborate on the details of this scene, please.', + 'Kindly furnish me with more information about this scene.', + 'Please expand on the intricate structure of this scene.', + 'Provide a meticulous explanation of what is in this scene.', + 'I request a detailed breakdown of this scene.', + 'Can you offer a complete analysis of this scene?', + 'I would like a comprehensive explanation of this scene.', + 'Please detail the specific features of this scene.', + 'Could you elaborate extensively on what this scene contains?', + 'Offer a brief overview of this scene.', + 'Can you provide a snapshot description of this scene?', + 'Break down the elements of this 3D scene.', + 'Detail the components of this scene.', + "What's happening in this 3D scene?", 'Illustrate this scene in words.', + 'How does this 3D scene appear to you?', 'Offer a synopsis of this scene.', + 'Provide a concise description of the current 3D scene.', + 'Could you give a short overview of this scene?', + 'Highlight the main features of this 3D scene.', + "Elaborate briefly on this scene's details.", + 'Give an account of the current 3D scene.', + 'What can you tell me about this scene?', 'Break down this scene for me.', + 'Share your observations about this 3D scene.', + 'What do you notice in this scene?', + 'Describe the main elements of this 3D scene.', + 'Capture the essence of this scene in a few words.', + "Provide insights about this scene's composition.", + 'How would you depict this 3D setting?', + 'Offer a brief narrative about this scene.', + 'Can you share a short summary of the 3D scene?', + 'Describe the atmosphere of this scene.', + 'What are the standout features of this 3D scene?', + 'Illustrate the main aspects of this scene.', + "Could you detail the scene's ambiance?", + "Highlight this 3D scene's focal points.", + 'Share a brief description of the current scene.', + "Could you give an outline of what's happening in this scene?", + 'Provide your perspective on this 3D scene.', + 'Sketch out the main elements of this scene.', + 'Help me understand the essence of this 3D scene.', + "What's your take on this particular scene?", + 'Draw a verbal picture of this 3D scene.', + 'What stands out to you in this 3D environment?', + 'Share the highlights of this scene.', + "Could you depict the scene's main attributes?", + 'Offer a depiction of this 3D scene.', + 'Walk me through the elements of this scene.', + 'Convey the main features of this 3D environment.', + 'How do you interpret this particular scene?', + 'Offer a verbal snapshot of this 3D setting.', + 'How would you characterize this scene?', + 'Share the nuances you observe in this 3D scene.', + 'Concisely, how would you present this scene?', + "Illustrate the scene's characteristics in words.", + 'Relay the core aspects of this 3D environment.', + "Describe the scene's dynamics to me.", + 'Share your initial thoughts on this 3D scene.', + "Provide a rundown of this scene's details.", + "Outline the scene's primary components.", + "Can you narrate what's happening in this 3D view?", + "Convey the scene's atmosphere in your words.", + 'Brief me on the key elements of this scene.', + 'Lay out the features of this 3D representation.', + 'How would you portray the current 3D scene?', + "Provide your summary of the scene's layout.", + 'Can you capture the mood of this scene?', + 'Detail the main components of this 3D environment.', + 'Give me your viewpoint on this particular scene.', + 'Discuss the prominent aspects of the 3D scene.', + 'How would you recap this scene briefly?', + 'Enumerate the features of this 3D scene.', + 'Convey the spirit of this scene in a sentence or two.', + "Offer a review of the scene's core elements.", + 'Paint a word picture of this 3D environment.', + 'How does this scene resonate with you?', + "Present the scene's key highlights.", + 'Give a concise interpretation of this 3D scene.', + 'Can you distill the essence of this scene?', + 'Unpack the details of this 3D scene for me.', + "Provide a brief of the scene's composition.", + "Guide me through this scene's aspects.", + 'Deliver a quick summary of this 3D setting.', + 'Analyze and describe this scene to me.', + 'Express the main points of this 3D environment.', + 'Relate the main highlights of this scene.', + 'How would you represent this scene in words?', + 'Provide a concise snapshot of this 3D view.', + "Lay out the scene's features for me.", + 'Briefly, what does this 3D scene entail?', + "Offer a quick rundown of this scene's aspects.", + 'Translate this scene into a brief description.', + 'Elucidate the main components of this 3D space.', + 'What are the principal elements of this scene?', + 'Paint a quick picture of this 3D landscape.', + 'Characterize the nuances of this scene.', + 'Render a description of this 3D setting.', + 'Communicate the gist of this scene.', 'Describe the scene in a nutshell.', + 'Lay bare the primary attributes of this 3D perspective.', + "Narrate the scene's main points to me.", + 'Pinpoint the core features of this scene.', + 'Give me a brief walkthrough of the 3D space.', + "How would you vocalize this scene's characteristics?", + "Explain the scene's main attributes.", + 'Sketch a quick portrait of this 3D depiction.', + 'Express the 3D scene in your own terms.', + "Relay a brief summary of this scene's elements.", + 'Disclose the main points of this 3D view.', + 'Condense the scene into a few descriptive words.', + "Provide insight into the scene's makeup.", + 'Offer your brief assessment of the 3D layout.', + 'Can you encapsulate the scene in a sentence?', + "Detail the scene's key traits.", + 'Give a concise review of the 3D surroundings.', + 'Provide a snapshot view of this scene.', + 'Briefly articulate the features of the 3D scene.', + 'Spell out the essentials of this scene for me.', + 'What elements form this scene?', 'Break this 3D scene down for me.', + 'Capture the core of this scene in a description.', + 'What can you discern from this 3D setup?', + "Offer me a glimpse into the scene's makeup.", + 'Share a descriptive view of this 3D perspective.', + "Can you boil down the scene's essence in words?", + 'Shed light on the main elements of this 3D scene.', + "Give me a snapshot of the scene's ambiance.", + 'How would you encapsulate the feel of this scene?', + 'Describe the core elements you perceive in this 3D space.', + 'Help me grasp the mood of this scene.', + 'Provide a brief commentary on this 3D view.', + 'Draw out the major elements of this scene.', + 'Narrate the scene in a few succinct lines.', + "How would you summarize the 3D scene's content?", + "Break down the 3D scene's components briefly.", + 'Walk me through the highlights of this scene.', + 'Draft a concise portrayal of this 3D environment.', + "Enlighten me on the scene's standout features.", + 'How would you verbally sketch this 3D scene?', + 'Can you compact the scene into a brief description?', + 'Lay out the key elements of this scene for me.', + 'Present a short summary of this 3D depiction.', + 'Highlight the essence of the scene briefly.', + "How would you paraphrase this scene's contents?", + 'Guide me briefly through this 3D setting.', + "What's the synopsis of this scene?", + "Express the 3D scene's mood in a sentence.", + "Concisely convey the scene's main elements.", + 'Offer a snapshot interpretation of this 3D scene.', + "How would you boil down the scene's main points?", + 'Quickly outline the features of this scene.', + "Capture the 3D scene's character briefly.", + 'In a few words, how would you depict this scene?', + 'Can you provide a concise 3D scene rundown?', + 'Report the main aspects of this scene.', + "Sum up the 3D scene's ambiance in brief.", + "Detail the scene's essence in a few lines.", + "Give me the scene's flavor in a short description." +] + +Leo_plan_instruction_pool = [ + 'Plan for the task', 'Can you come up with a plan of this task', + 'How can we do this task, provide a step by step plan', + 'Draft a plan for completing this task', 'Detail a strategy for the task', + "What's the best plan for this task", 'Draw out a procedure for the task', + 'Lay out the steps for this task', 'Could you devise a plan for the task', + 'Show me a plan for this task', 'I need a plan for the task', + 'Sketch a plan for the task at hand', 'Set up a plan for this', + 'Recommend a plan for this task', 'Offer a strategy for this task', + 'Design a blueprint for the task', 'Outline the approach for this task', + 'Can you draft a method for this task', 'Propose a plan for the task', + 'Walk me through a plan for this task', 'Develop a plan for this', + 'Formulate a strategy for the task', 'Elaborate a plan for this task', + 'Define a route for the task', 'Envision a plan for the task', + 'What would be a plan for this task', 'Illustrate a plan for the task', + 'Plan out how to tackle this task', 'Build a plan for this job', + 'Map out the task for me', "What's our game plan for this task", + 'Can you brief a plan for this task', 'Design a roadmap for the task', + 'Jot down a plan for the task', 'Provide a strategy for the task', + "Chart out the task's plan", 'Display a plan for this', + 'Conceive a plan for the task', 'Can you list the steps for this task', + "Could you outline the task's strategy", + 'Break down the task into steps for me', 'Show the way to do this task', + 'Reveal the plan for the task', "What's the procedure for this task", + 'How should we approach this task', 'Prepare a plan for this task', + 'Instruct on planning the task', 'Offer insights on how to plan this task', + 'Could you break down the task plan', + "Explain the task's sequence of actions", + 'What would be the strategy for this', 'Create a layout for the task', + 'Draw up a plan for this operation', 'Provide a directive for the task', + 'Tell me how we can plan the task', 'Describe a plan for the task', + 'Give a suggestion on how to plan the task', + 'Draft a sequence for the task', 'List the procedure for this task', + 'Can you depict a plan for the task', 'Provide a blueprint for the task', + 'Frame a strategy for the task', 'Make a plan for executing the task', + 'How should this task be planned', 'Produce a plan for the task', + "What's the scheme for the task", + 'Help me understand the plan for the task', + 'Chalk out a plan for the task', 'How would you plan this task', + 'What steps are needed for this task', + 'How can we best plan for this task', 'Draft the roadmap for the task', + 'Mention the plan for the task', 'Guide me with a plan for the task', + 'Write down a plan for the task', 'Can you clarify the plan for this task', + 'Construct a plan for the task', 'Give me the steps for this task', + 'Strategize the task for me', 'Share a plan for the task', + 'How can we strategize this task', 'Offer a guide for this task', + 'Arrange a plan for this task', 'Demonstrate the plan for the task', + 'Tell me the method for this task', 'Present a strategy for the task', + "Can you brief me on the task's plan", 'Form a plan for the task', + 'What are the guidelines for this task', 'How do we map out this task', + "What's the plan of action for this task", + 'Help me set a plan for this task', 'State a plan for the task', + 'Can you outline steps for this task', + 'How can we systematically plan this task', + 'Lay out a blueprint for the task', 'Draft an approach for the task', + 'Detail a method for the task', 'How do you see the plan for this task', + 'Plan how to execute the task', "I'd like a plan for this task", + 'Structure a plan for the task', "What's the task's layout", + 'Share your thoughts on planning the task', + 'What would be a systematic plan for the task', + 'How do you suggest we tackle this task', + 'Could you illustrate the steps for the task', + 'Draft the sequence for the task', 'List out a plan for the task', + 'Propose a strategy for this task', "Give me a rundown of the task's plan", + 'How should this task be structured', 'Craft a plan for the task', + 'Help me draft a plan for the task', 'Create a scheme for the task', + 'Define a strategy for the task', 'Explain the steps for this task', + 'Can you formulate a strategy for the task', + "Guide me through the task's plan", 'Organize a plan for this task', + 'Render a plan for the task', 'Provide direction for this task', + 'Set the plan for the task', 'How would you structure the task', + "Give me the task's breakdown", 'How do we set a course for this task', + 'Can you guide me in planning this task', + "What's the framework for the task", 'Write out a plan for the task', + 'How would we go about this task', 'Delineate the steps for the task', + 'Tell me the strategy for the task', 'Give a structured plan for the task', + 'How do we layout this task', 'Give me a clear plan for the task', + 'Could you plot a course for the task', + "Help in setting up the task's plan", 'Give a directive for the task', + 'Map the steps for this task', "How can we layout the task's plan", + 'Establish a plan for the task', "Help with the task's strategy", + 'Create a course of action for the task', + 'Describe the steps for the task', 'Present the blueprint for the task', + 'Share the procedure for the task', 'How should we map this task', + 'Detail the roadmap for the task', + 'Give me a plan on how to approach the task', + "Provide guidance on the task's plan", 'Break the task into steps', + 'Show me the roadmap for the task', + "What's the strategy for executing the task", + "What's the plan of approach for the task", 'Help me strategize the task', + "Write up the task's plan", "Present the task's method", + 'Tell me how to plan out the task', 'Define the blueprint for the task', + 'Explain the blueprint for the task', + 'Could you give a plan for this task', 'Set out a strategy for the task', + 'Describe how to go about this task', 'How should we lay out the task', + 'Can you provide a plan for the task', 'Set forth the steps for the task', + 'How can we set up the task', "Plan the task's procedure", + 'How do we execute the task', 'Propose a method for the task', + 'Write a structured plan for the task', "Can you detail the task's steps", + 'Draw the layout for the task', 'Conduct a plan for the task', + 'Please give a strategy for the task', 'How should we organize the task', + 'Show the method for the task', 'Recommend steps for this task', + 'Form a strategy for the task', "Help me figure out the task's plan", + 'Set a strategy for this task', 'Can you delineate the task', + 'How should we proceed with the task', "Guide me on the task's steps", + 'Offer a method for the task', 'Give the procedure for the task', + 'How do we detail the task', "Can you arrange the task's strategy", + 'Give a breakdown of the task', 'Plan the steps for this task', + "What's the systematic approach for the task", + 'Outline a procedure for this task', 'Create a step by step for the task', + "Can you write out the task's strategy", 'Present the steps for the task', + 'Detail the plan for this task', + 'Could you provide direction for the task', + "What's the strategy to follow for this task", + 'Plan the course of action for the task', 'Can you help plan this task', + "Help me with the task's blueprint", "Construct the task's strategy" +] diff --git a/models/LEO/evaluator/GPT_eval.py b/models/LEO/evaluator/GPT_eval.py new file mode 100644 index 0000000..621915a --- /dev/null +++ b/models/LEO/evaluator/GPT_eval.py @@ -0,0 +1,24 @@ +import json +from argparse import ArgumentParser +from pathlib import Path + +from mmscan import GPTEvaluator + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument('--file', type=str, required=True) + parser.add_argument('--tmp_path', type=str, required=True) + parser.add_argument('--api_key', type=str, required=True) + parser.add_argument('--eval_size', type=int, default=-1) + parser.add_argument('--nproc', type=int, default=8) + args = parser.parse_args() + + leo_file_path = args.file + + evaluator = GPTEvaluator(eval_size =args.eval_size,\ + api_key=args.api_key) + + with open(leo_file_path, 'r') as f: + results = json.load(f) + print(evaluator.load_and_eval(results,num_threads=args.nproc,\ + tmp_path =args.tmp_path)) diff --git a/models/LEO/evaluator/__init__.py b/models/LEO/evaluator/__init__.py new file mode 100755 index 0000000..12e1efd --- /dev/null +++ b/models/LEO/evaluator/__init__.py @@ -0,0 +1,5 @@ +from .cap_eval import CaptionEvaluator +from .eai_eval import CLIPortEvaluator, ObjNavEvaluator +from .mmscan_eval import MMScanEvaluator +from .scanqa_eval import ScanQAEvaluator +from .sqa3d_eval import SQA3DEvaluator diff --git a/models/LEO/evaluator/build.py b/models/LEO/evaluator/build.py new file mode 100755 index 0000000..e8fbeb4 --- /dev/null +++ b/models/LEO/evaluator/build.py @@ -0,0 +1,7 @@ +from fvcore.common.registry import Registry + +EVALUATOR_REGISTRY = Registry('Evaluator') + + +def build_eval_leo(cfg, task_name, evaluator_name): + return EVALUATOR_REGISTRY.get(evaluator_name)(cfg, task_name) diff --git a/models/LEO/evaluator/cap_eval.py b/models/LEO/evaluator/cap_eval.py new file mode 100644 index 0000000..4bfb000 --- /dev/null +++ b/models/LEO/evaluator/cap_eval.py @@ -0,0 +1,166 @@ +import json +from pathlib import Path + +import numpy as np +from evaluator.build import EVALUATOR_REGISTRY +from evaluator.ngram_metrics.bleu.bleu import Bleu +from evaluator.ngram_metrics.cider.cider import Cider +from evaluator.ngram_metrics.rouge.rouge import Rouge +from sentence_transformers import SentenceTransformer +from sentence_transformers.util import pytorch_cos_sim + + +@EVALUATOR_REGISTRY.register() +class CaptionEvaluator(): + + def __init__(self, cfg, task_name): + self.task_name = task_name + + self.cider_scorer = Cider() + self.bleu_scorer = Bleu() + self.rouge_scorer = Rouge() + + self.best_result = -np.inf + + self.save_dir = Path(cfg.exp_dir) / 'eval_results' / task_name + self.save_dir.mkdir(parents=True, exist_ok=True) + + self.corpus_path = cfg.data.scan2cap.corpus if self.task_name.lower( + ) == 'scan2cap' else None + self.sentence_model = SentenceTransformer( + 'sentence-transformers/all-MiniLM-L6-v2') + + self.reset() + + def reset(self): + self.eval_dict = { + 'target_metric': [], + 'sentence_sim': [], + 'cider': 0, + 'bleu': 0, + 'rouge': 0, + } + self.total_count = 0 + self.save_results = [] + self.init_corpus() + + def init_corpus(self): + if self.task_name.lower() == 'scan2cap': + with open(self.corpus_path, 'r') as f: + self.gt_sentence_mp = json.load(f) + self.pred_sentence_mp = {} + else: + # init with list, finally convert to dict + self.gt_sentence_mp = [] + self.pred_sentence_mp = [] + + def batch_metrics(self, data_dict): + metrics = {} + output_gt = data_dict['output_gt'] + output_pred = data_dict['output_txt'] + batch_size = len(output_gt) + + # consider IoU-based caption metrics + if 'iou_flag' in data_dict: + iou_flags = data_dict['iou_flag'] + else: + iou_flags = [True] * batch_size + + if self.task_name.lower() == 'scan2cap': + for i in range(batch_size): + corpus_key = data_dict['corpus_key'][i] + if iou_flags[i]: + self.pred_sentence_mp[corpus_key] = [ + ('sos ' + output_pred[i] + ' eos').replace( + '. ', ' . ') + ] + else: + output_pred[i] = '' + self.pred_sentence_mp[corpus_key] = ['sos eos'] + else: + for i in range(batch_size): + if iou_flags[i]: + self.pred_sentence_mp.append([output_pred[i]]) + else: + output_pred[i] = '' + self.pred_sentence_mp.append(['']) + self.gt_sentence_mp.append([output_gt[i]]) + + # compute sentence similarity + embed_pred = self.sentence_model.encode(output_pred, + convert_to_tensor=True) + embed_gt = self.sentence_model.encode(output_gt, + convert_to_tensor=True) + sims = pytorch_cos_sim(embed_pred, embed_gt).diag() + + metrics['total_count'] = batch_size + metrics['sentence_sim'] = sims.mean().item() + metrics['target_metric'] = metrics['sentence_sim'] + return metrics + + def update(self, data_dict): + metrics = self.batch_metrics(data_dict) + batch_size = metrics['total_count'] + self.total_count += batch_size + + for i in range(batch_size): + save_dict = { + # vision + 'source': data_dict['source'][i], + 'scene_id': data_dict['scene_id'][i], + 'anchor': data_dict['anchor_locs'][i].tolist(), + # language + 'instruction': data_dict['prompt_after_obj'][i], + 'response_gt': data_dict['output_gt'][i], + 'response_pred': data_dict['output_txt'][i], + } + if 'iou_flag' in data_dict: + save_dict['iou_flag'] = data_dict['iou_flag'][i].item() + self.save_results.append(save_dict) + + for key in self.eval_dict.keys(): + if key not in ['cider', 'bleu', 'rouge']: + self.eval_dict[key].append(metrics[key] * batch_size) + + def record(self, split, is_main_process): + # ngram metrics + if self.task_name.lower() == 'scan2cap': + # align gt_sentence_mp to pred_sentence_mp for partial evaluation + self.gt_sentence_mp = { + k: self.gt_sentence_mp[k] + for k in self.pred_sentence_mp.keys() + } + else: + self.gt_sentence_mp = { + k: v + for k, v in enumerate(self.gt_sentence_mp) + } + self.pred_sentence_mp = { + k: v + for k, v in enumerate(self.pred_sentence_mp) + } + + self.eval_dict['cider'] = self.cider_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0] + self.eval_dict['bleu'] = self.bleu_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0][-1] + + self.eval_dict['rouge'] = self.rouge_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0] + + # others + for k, v in self.eval_dict.items(): + if k not in ['cider', 'bleu', 'rouge']: + self.eval_dict[k] = sum(v) / self.total_count + + if self.eval_dict['target_metric'] > self.best_result: + is_best = True + self.best_result = self.eval_dict['target_metric'] + else: + is_best = False + + if (is_best or split == 'test') and is_main_process: + with open(str(self.save_dir / 'results.json'), 'w') as f: + json.dump(self.save_results, f, indent=2) + + return is_best, self.eval_dict diff --git a/models/LEO/evaluator/eai_eval.py b/models/LEO/evaluator/eai_eval.py new file mode 100644 index 0000000..659e7ad --- /dev/null +++ b/models/LEO/evaluator/eai_eval.py @@ -0,0 +1,172 @@ +import json +from pathlib import Path + +import numpy as np +from accelerate.logging import get_logger +from data.eai import (_CLIPORT_ACTION_SPACE_U, _CLIPORT_ACTION_SPACE_V, + _CLIPORT_ACTION_SPACE_ZROT, _DUMMY_CLIPORT_ACTION, + CLIPORT_ACTION_SPACE_DETOKENIZE) +from evaluator.build import EVALUATOR_REGISTRY + +logger = get_logger(__name__) + + +@EVALUATOR_REGISTRY.register() +class ObjNavEvaluator(): + + def __init__(self, cfg, task_name): + self.task_name = task_name + self.best_result = -np.inf + self.save_dir = Path(cfg.exp_dir) / 'eval_results' / task_name + self.save_dir.mkdir(parents=True, exist_ok=True) + self.reset() + + def reset(self): + self.eval_dict = {'target_metric': [], 'accuracy': []} + self.total_count = 0 + self.save_results = [] + + def batch_metrics(self, data_dict): + metrics = {} + preds = data_dict['output_txt'] + gts = data_dict['output_gt'] + + correct = 0 + for pred, gt in zip(preds, gts): + if pred == gt: + correct += 1 + + batch_size = len(gts) + metrics['total_count'] = batch_size + metrics['accuracy'] = correct / batch_size + metrics['target_metric'] = metrics['accuracy'] + return metrics + + def update(self, data_dict): + metrics = self.batch_metrics(data_dict) + batch_size = metrics['total_count'] + self.total_count += batch_size + + for i in range(batch_size): + self.save_results.append({ + # vision + 'source': + data_dict['source'][i], + 'scene_id': + data_dict['scene_id'][i], + # language + 'instruction': + data_dict['prompt_after_obj'][i], + 'response_gt': + data_dict['output_gt'][i], + 'response_pred': + data_dict['output_txt'][i], + }) + + for key in self.eval_dict.keys(): + self.eval_dict[key].append(metrics[key] * batch_size) + + def record(self, split, is_main_process): + for k, v in self.eval_dict.items(): + self.eval_dict[k] = sum(v) / self.total_count + + if self.eval_dict['target_metric'] > self.best_result: + is_best = True + self.best_result = self.eval_dict['target_metric'] + else: + is_best = False + + if (is_best or split == 'test') and is_main_process: + with open(str(self.save_dir / 'results.json'), 'w') as f: + json.dump(self.save_results, f, indent=2) + + return is_best, self.eval_dict + + +@EVALUATOR_REGISTRY.register() +class CLIPortEvaluator(ObjNavEvaluator): + + def reset(self): + self.eval_dict = { + 'target_metric': [], + 'accuracy': [], + 'action_error_pose0': [], + 'action_error_pose1': [], + } + self.total_count = 0 + self.save_results = [] + + def batch_metrics(self, data_dict): + metrics = super().batch_metrics(data_dict) + + # add action errors (for xy coordinates) to metrics + metrics['action_error_pose0'] = [] + metrics['action_error_pose1'] = [] + for pred, gt in zip(data_dict['output_txt'], data_dict['output_gt']): + action_pred = CLIPortEvaluator.parse_action_cliport(pred) + action_gt = CLIPortEvaluator.parse_action_cliport(gt) + for pose_name in ['pose0', 'pose1']: + xy_pred = action_pred[pose_name][0][:2] + xy_gt = action_gt[pose_name][0][:2] + error = np.linalg.norm(xy_pred - xy_gt) + metrics[f'action_error_{pose_name}'].append(error) + metrics['action_error_pose0'] = np.mean(metrics['action_error_pose0']) + metrics['action_error_pose1'] = np.mean(metrics['action_error_pose1']) + + return metrics + + @staticmethod + def parse_action_cliport(text, obs=None): + vocab = list(_CLIPORT_ACTION_SPACE_U.values()) + list( + _CLIPORT_ACTION_SPACE_V.values()) + list( + _CLIPORT_ACTION_SPACE_ZROT.values()) + tokens = CLIPortEvaluator.tokenize(text, '', vocab) + if len(tokens) != 6: + logger.info(f'Cannot parse action: {text}') + return _DUMMY_CLIPORT_ACTION + try: + pose0 = CLIPORT_ACTION_SPACE_DETOKENIZE(tokens[:3], obs) + pose1 = CLIPORT_ACTION_SPACE_DETOKENIZE(tokens[3:], obs) + return { + 'pose0': pose0, + 'pose1': pose1, + } + except Exception as e: + logger.info(f'{e}, cannot parse action: {text}') + return _DUMMY_CLIPORT_ACTION + + @staticmethod + def tokenize(text, prefix, vocab): + # Find the starting index of the prefix in the text + start_index = text.find(prefix) + + # If the prefix is not found, return an empty list + if start_index == -1: + return [] + + # Get the text after the prefix + after_prefix = text[start_index + len(prefix):] + + # Create a list to hold the token ids + tokens = [] + + # Iterate over each character in after_prefix + i = 0 + while i < len(after_prefix): + # Try to find the longest token in vocab that matches the current position + match = None + max_length = 0 + for token in vocab: + token_length = len(token) + if after_prefix[ + i:i + + token_length] == token and token_length > max_length: + match = token + max_length = token_length + if match is not None: + tokens.append(match) + i += max_length + else: + i += 1 + + return tokens diff --git a/models/LEO/evaluator/mmscan_eval.py b/models/LEO/evaluator/mmscan_eval.py new file mode 100644 index 0000000..4991d9e --- /dev/null +++ b/models/LEO/evaluator/mmscan_eval.py @@ -0,0 +1,85 @@ +import json +from pathlib import Path + +from mmscan import QuestionAnsweringEvaluator + +model_config = {'simcse': '', 'sbert': ''} + +from evaluator.build import EVALUATOR_REGISTRY + + +@EVALUATOR_REGISTRY.register() +class MMScanEvaluator(): + + def __init__(self, cfg, task_name): + self.evaluator = QuestionAnsweringEvaluator(model_config) + self.task_name = task_name + self.target_metric = 'refined_EM' + self.best_result = 0.0 + self.save_dir = Path('./logs') / 'eval_results' / task_name + self.save_dir.mkdir(parents=True, exist_ok=True) + + def to_mmscan_form(self, raw_input): + + _input = {} + _input['ID'] = raw_input['question_id'] + if not isinstance(raw_input['output_txt'], list): + _input['pred'] = [raw_input['output_txt']] + else: + _input['pred'] = raw_input['output_txt'] + if not isinstance(raw_input['output_gt'], list): + + _input['gt'] = [raw_input['output_gt']] + else: + _input['gt'] = raw_input['output_gt'] + _input['question'] = raw_input['prompt_after_obj'] + + return _input + + def reset(self): + self.evaluator.reset() + + def update(self, raw_input_dict): + # update buffer + raw_input_dict = flatten_items(raw_input_dict) + batch_input = [] + for _input in raw_input_dict: + batch_input.append(self.to_mmscan_form(_input)) + + self.evaluator.update(batch_input) + + @property + def save_results(self): + return self.evaluator.save_buffer + + def record(self, split, is_main_process): + # record after a whole epoch + self.evaluator.start_evaluation() + + results = self.evaluator.metric_record + + score = results[self.target_metric] + results['target_metric'] = score + + if score > self.best_result: + is_best = True + self.best_result = score + else: + is_best = False + + if (is_best or split == 'test') and is_main_process: + with open(str(self.save_dir / 'results.json'), 'w') as f: + json.dump(self.save_results, f, indent=2) + + return is_best, results + + +def flatten_items(raw_input_dict): + batch_input = [] + for _index in range(len(raw_input_dict['question_id'])): + _input = {} + for key_name in raw_input_dict: + _input[key_name] = raw_input_dict[key_name][_index] + + batch_input.append(_input) + return batch_input diff --git a/models/LEO/evaluator/ngram_metrics/bleu/__init__.py b/models/LEO/evaluator/ngram_metrics/bleu/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/models/LEO/evaluator/ngram_metrics/bleu/bleu.py b/models/LEO/evaluator/ngram_metrics/bleu/bleu.py new file mode 100755 index 0000000..181954a --- /dev/null +++ b/models/LEO/evaluator/ngram_metrics/bleu/bleu.py @@ -0,0 +1,59 @@ +#!/usr/bin/env python +# +# File Name : bleu.py +# +# Description : Wrapper for BLEU scorer. +# +# Creation Date : 06-01-2015 +# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT +# Authors : Hao Fang and Tsung-Yi Lin + +import json + +from .bleu_scorer import BleuScorer + + +class Bleu: + + def __init__(self, n=4): + # default compute Blue score up to 4 + self._n = n + self._hypo_for_image = {} + self.ref_for_image = {} + + def compute_score(self, gts, res): + + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + + bleu_scorer = BleuScorer(n=self._n) + for id in imgIds: + hypo = res[id] + ref = gts[id] + + # Sanity check. + assert (type(hypo) is list) + assert (len(hypo) >= 1) + assert (type(ref) is list) + assert (len(ref) >= 1) + + bleu_scorer += (hypo[0], ref) + + #score, scores = bleu_scorer.compute_score(option='shortest') + score, scores = bleu_scorer.compute_score(option='closest', verbose=0) + #score, scores = bleu_scorer.compute_score(option='average', verbose=1) + + # return (bleu, bleu_info) + return score, scores + + def method(self): + return 'Bleu' + + +if __name__ == '__main__': + x = Bleu(4) + with open('/home/zhuziyu/work/vlpr/3dVL/scan2cap_result.json', 'r') as f: + json_file = json.load(f) + print( + x.compute_score(json_file['gt_sentence_mp'], + json_file['pred_sentence_mp'])[0]) diff --git a/models/LEO/evaluator/ngram_metrics/bleu/bleu_scorer.py b/models/LEO/evaluator/ngram_metrics/bleu/bleu_scorer.py new file mode 100755 index 0000000..f422dfc --- /dev/null +++ b/models/LEO/evaluator/ngram_metrics/bleu/bleu_scorer.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python + +# bleu_scorer.py +# David Chiang + +# Copyright (c) 2004-2006 University of Maryland. All rights +# reserved. Do not redistribute without permission from the +# author. Not for commercial use. + +# Modified by: +# Hao Fang +# Tsung-Yi Lin +'''Provides: +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +''' + +import copy +import math +import re +import sys +from collections import defaultdict + + +def precook(s, n=4, out=False): + """Takes a string as input and returns an object that can be given to + either cook_refs or cook_test. + + This is optional: cook_refs and cook_test can take string arguments as + well. + """ + words = s.split() + counts = defaultdict(int) + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i:i + k]) + counts[ngram] += 1 + return (len(words), counts) + + +def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average" + """Takes a list of reference sentences for a single segment and returns an + object that encapsulates everything that BLEU needs to know about them.""" + + reflen = [] + maxcounts = {} + for ref in refs: + rl, counts = precook(ref, n) + reflen.append(rl) + for (ngram, count) in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram, 0), count) + + # Calculate effective reference sentence length. + if eff == 'shortest': + reflen = min(reflen) + elif eff == 'average': + reflen = float(sum(reflen)) / len(reflen) + + ## lhuang: N.B.: leave reflen computaiton to the very end!! + + ## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design) + + return (reflen, maxcounts) + + +def cook_test(test, refs, eff=None, n=4): + """Takes a test sentence and returns an object that encapsulates everything + that BLEU needs to know about it.""" + + reflen, refmaxcounts = refs + testlen, counts = precook(test, n, True) + + result = {} + + # Calculate effective reference sentence length. + + if eff == 'closest': + result['reflen'] = min((abs(l - testlen), l) for l in reflen)[1] + else: ## i.e., "average" or "shortest" or None + result['reflen'] = reflen + + result['testlen'] = testlen + + result['guess'] = [max(0, testlen - k + 1) for k in range(1, n + 1)] + + result['correct'] = [0] * n + for (ngram, count) in counts.items(): + result['correct'][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), + count) + + return result + + +class BleuScorer(object): + """Bleu scorer.""" + + __slots__ = 'n', 'crefs', 'ctest', '_score', '_ratio', '_testlen', '_reflen', 'special_reflen' + + # special_reflen is used in oracle (proportional effective ref len for a node). + + def copy(self): + """copy the refs.""" + new = BleuScorer(n=self.n) + new.ctest = copy.copy(self.ctest) + new.crefs = copy.copy(self.crefs) + new._score = None + return new + + def __init__(self, test=None, refs=None, n=4, special_reflen=None): + """singular instance.""" + + self.n = n + self.crefs = [] + self.ctest = [] + self.cook_append(test, refs) + self.special_reflen = special_reflen + + def cook_append(self, test, refs): + """called by constructor and __iadd__ to avoid creating new + instances.""" + + if refs is not None: + self.crefs.append(cook_refs(refs)) + if test is not None: + cooked_test = cook_test(test, self.crefs[-1]) + self.ctest.append(cooked_test) ## N.B.: -1 + else: + self.ctest.append( + None) # lens of crefs and ctest have to match + + self._score = None ## need to recompute + + def ratio(self, option=None): + self.compute_score(option=option) + return self._ratio + + def score_ratio(self, option=None): + """return (bleu, len_ratio) pair.""" + return (self.fscore(option=option), self.ratio(option=option)) + + def score_ratio_str(self, option=None): + return '%.4f (%.2f)' % self.score_ratio(option) + + def reflen(self, option=None): + self.compute_score(option=option) + return self._reflen + + def testlen(self, option=None): + self.compute_score(option=option) + return self._testlen + + def retest(self, new_test): + if type(new_test) is str: + new_test = [new_test] + assert len(new_test) == len(self.crefs), new_test + self.ctest = [] + for t, rs in zip(new_test, self.crefs): + self.ctest.append(cook_test(t, rs)) + self._score = None + + return self + + def rescore(self, new_test): + """replace test(s) with new test(s), and returns the new score.""" + + return self.retest(new_test).compute_score() + + def size(self): + assert len(self.crefs) == len( + self.ctest), 'refs/test mismatch! %d<>%d' % (len( + self.crefs), len(self.ctest)) + return len(self.crefs) + + def __iadd__(self, other): + """add an instance (e.g., from another sentence).""" + + if type(other) is tuple: + ## avoid creating new BleuScorer instances + self.cook_append(other[0], other[1]) + else: + assert self.compatible(other), 'incompatible BLEUs.' + self.ctest.extend(other.ctest) + self.crefs.extend(other.crefs) + self._score = None ## need to recompute + + return self + + def compatible(self, other): + return isinstance(other, BleuScorer) and self.n == other.n + + def single_reflen(self, option='average'): + return self._single_reflen(self.crefs[0][0], option) + + def _single_reflen(self, reflens, option=None, testlen=None): + + if option == 'shortest': + reflen = min(reflens) + elif option == 'average': + reflen = float(sum(reflens)) / len(reflens) + elif option == 'closest': + reflen = min((abs(l - testlen), l) for l in reflens)[1] + else: + assert False, 'unsupported reflen option %s' % option + + return reflen + + def recompute_score(self, option=None, verbose=0): + self._score = None + return self.compute_score(option, verbose) + + def compute_score(self, option=None, verbose=0): + n = self.n + small = 1e-9 + tiny = 1e-15 ## so that if guess is 0 still return 0 + bleu_list = [[] for _ in range(n)] + + if self._score is not None: + return self._score + + if option is None: + option = 'average' if len(self.crefs) == 1 else 'closest' + + self._testlen = 0 + self._reflen = 0 + totalcomps = { + 'testlen': 0, + 'reflen': 0, + 'guess': [0] * n, + 'correct': [0] * n + } + + # for each sentence + for comps in self.ctest: + testlen = comps['testlen'] + self._testlen += testlen + + if self.special_reflen is None: ## need computation + reflen = self._single_reflen(comps['reflen'], option, testlen) + else: + reflen = self.special_reflen + + self._reflen += reflen + + for key in ['guess', 'correct']: + for k in range(n): + totalcomps[key][k] += comps[key][k] + + # append per image bleu score + bleu = 1. + for k in range(n): + bleu *= (float(comps['correct'][k]) + tiny) \ + /(float(comps['guess'][k]) + small) + bleu_list[k].append(bleu**(1. / (k + 1))) + ratio = (testlen + tiny) / (reflen + small + ) ## N.B.: avoid zero division + if ratio < 1: + for k in range(n): + bleu_list[k][-1] *= math.exp(1 - 1 / ratio) + + if verbose > 1: + print(comps, reflen) + + totalcomps['reflen'] = self._reflen + totalcomps['testlen'] = self._testlen + + bleus = [] + bleu = 1. + for k in range(n): + bleu *= float(totalcomps['correct'][k] + tiny) \ + / (totalcomps['guess'][k] + small) + bleus.append(bleu**(1. / (k + 1))) + ratio = (self._testlen + tiny) / (self._reflen + small + ) ## N.B.: avoid zero division + if ratio < 1: + for k in range(n): + bleus[k] *= math.exp(1 - 1 / ratio) + + if verbose > 0: + print(totalcomps) + print('ratio:', ratio) + + self._score = bleus + return self._score, bleu_list diff --git a/models/LEO/evaluator/ngram_metrics/cider/cider.py b/models/LEO/evaluator/ngram_metrics/cider/cider.py new file mode 100755 index 0000000..aaf3105 --- /dev/null +++ b/models/LEO/evaluator/ngram_metrics/cider/cider.py @@ -0,0 +1,66 @@ +# Filename: cider.py +# +# Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric +# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726) +# +# Creation Date: Sun Feb 8 14:16:54 2015 +# +# Authors: Ramakrishna Vedantam and Tsung-Yi Lin + +import json +import pdb + +from .cider_scorer import CiderScorer + + +class Cider: + """Main Class to compute the CIDEr metric.""" + + def __init__(self, test=None, refs=None, n=4, sigma=6.0): + # set cider to sum over 1 to 4-grams + self._n = n + # set the standard deviation parameter for gaussian penalty + self._sigma = sigma + + def compute_score(self, gts, res): + """Main function to compute CIDEr score. + + :param hypo_for_image (dict) : dictionary with key and value pred + ref_for_image (dict) : dictionary with key and value gt + :return: cider (float) : computed CIDEr score for the corpus + """ + + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + + cider_scorer = CiderScorer(n=self._n, sigma=self._sigma) + + for id in imgIds: + hypo = res[id] + ref = gts[id] + + # Sanity check. + assert (type(hypo) is list) + assert (len(hypo) >= 1) + assert (type(ref) is list) + assert (len(ref) > 0) + + cider_scorer += (hypo[0], ref) + + (score, scores) = cider_scorer.compute_score() + + return score, scores + + def method(self): + return 'CIDEr' + + +if __name__ == '__main__': + x = Cider() + with open('/home/zhuziyu/work/vlpr/3dVL/scan2cap_result.json', 'r') as f: + json_file = json.load(f) + #print(json_file['gt_sentence_mp']) + #print(x.compute_score({"scene_001": ["This is a chair"], "scene_002": ["That is a book"]}, {"scene_001": ["This is a chair"], "scene_002": ["That is a book"]})) + print( + x.compute_score(json_file['gt_sentence_mp'], + json_file['pred_sentence_mp'])) diff --git a/models/LEO/evaluator/ngram_metrics/cider/cider_scorer.py b/models/LEO/evaluator/ngram_metrics/cider/cider_scorer.py new file mode 100755 index 0000000..b18372f --- /dev/null +++ b/models/LEO/evaluator/ngram_metrics/cider/cider_scorer.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# Tsung-Yi Lin +# Ramakrishna Vedantam + +import copy +import math +import pdb +from collections import defaultdict + +import numpy as np + + +def precook(s, n=4, out=False): + """Takes a string as input and returns an object that can be given to + either cook_refs or cook_test. This is optional: cook_refs and cook_test + can take string arguments as well. + + :param s: string : sentence to be converted into ngrams + :param n: int : number of ngrams for which representation is calculated + :return: term frequency vector for occuring ngrams + """ + words = s.split() + counts = defaultdict(int) + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i:i + k]) + counts[ngram] += 1 + return counts + + +def cook_refs(refs, n=4): ## lhuang: oracle will call with "average" + """Takes a list of reference sentences for a single segment and returns an + object that encapsulates everything that BLEU needs to know about them. + + :param refs: list of string : reference sentences for some image + :param n: int : number of ngrams for which (ngram) representation is calculated + :return: result (list of dict) + """ + return [precook(ref, n) for ref in refs] + + +def cook_test(test, n=4): + """Takes a test sentence and returns an object that encapsulates everything + that BLEU needs to know about it. + + :param test: list of string : hypothesis sentence for some image + :param n: int : number of ngrams for which (ngram) representation is calculated + :return: result (dict) + """ + return precook(test, n, True) + + +class CiderScorer(object): + """CIDEr scorer.""" + + def copy(self): + """copy the refs.""" + new = CiderScorer(n=self.n) + new.ctest = copy.copy(self.ctest) + new.crefs = copy.copy(self.crefs) + return new + + def __init__(self, test=None, refs=None, n=4, sigma=6.0): + """singular instance.""" + self.n = n + self.sigma = sigma + self.crefs = [] + self.ctest = [] + self.document_frequency = defaultdict(float) + self.cook_append(test, refs) + self.ref_len = None + + def cook_append(self, test, refs): + """called by constructor and __iadd__ to avoid creating new + instances.""" + + if refs is not None: + self.crefs.append(cook_refs(refs)) + if test is not None: + self.ctest.append(cook_test(test)) ## N.B.: -1 + else: + self.ctest.append( + None) # lens of crefs and ctest have to match + + def size(self): + assert len(self.crefs) == len( + self.ctest), 'refs/test mismatch! %d<>%d' % (len( + self.crefs), len(self.ctest)) + return len(self.crefs) + + def __iadd__(self, other): + """add an instance (e.g., from another sentence).""" + + if type(other) is tuple: + ## avoid creating new CiderScorer instances + self.cook_append(other[0], other[1]) + else: + self.ctest.extend(other.ctest) + self.crefs.extend(other.crefs) + + return self + + def compute_doc_freq(self): + """Compute term frequency for reference data. + + This will be used to compute idf (inverse document frequency later) + The term frequency is stored in the object + :return: None + """ + for refs in self.crefs: + # refs, k ref captions of one image + for ngram in set( + [ngram for ref in refs for (ngram, count) in ref.items()]): + self.document_frequency[ngram] += 1 + # maxcounts[ngram] = max(maxcounts.get(ngram,0), count) + + def compute_cider(self): + + def counts2vec(cnts): + """Function maps counts of ngram to vector of tfidf weights. The + function returns vec, an array of dictionary that store mapping of + n-gram and tf-idf weights. The n-th entry of array denotes length + of n-grams. + + :param cnts: + :return: vec (array of dict), norm (array of float), length (int) + """ + vec = [defaultdict(float) for _ in range(self.n)] + length = 0 + norm = [0.0 for _ in range(self.n)] + for (ngram, term_freq) in cnts.items(): + # give word count 1 if it doesn't appear in reference corpus + df = np.log(max(1.0, self.document_frequency[ngram])) + # ngram index + n = len(ngram) - 1 + # tf (term_freq) * idf (precomputed idf) for n-grams + vec[n][ngram] = float(term_freq) * (self.ref_len - df) + # compute norm for the vector. the norm will be used for computing similarity + norm[n] += pow(vec[n][ngram], 2) + + if n == 1: + length += term_freq + norm = [np.sqrt(n) for n in norm] + return vec, norm, length + + def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref): + """Compute the cosine similarity of two vectors. + + :param vec_hyp: array of dictionary for vector corresponding to hypothesis + :param vec_ref: array of dictionary for vector corresponding to reference + :param norm_hyp: array of float for vector corresponding to hypothesis + :param norm_ref: array of float for vector corresponding to reference + :param length_hyp: int containing length of hypothesis + :param length_ref: int containing length of reference + :return: array of score for each n-grams cosine similarity + """ + delta = float(length_hyp - length_ref) + # measure consine similarity + val = np.array([0.0 for _ in range(self.n)]) + for n in range(self.n): + # ngram + for (ngram, count) in vec_hyp[n].items(): + # vrama91 : added clipping + val[n] += min(vec_hyp[n][ngram], + vec_ref[n][ngram]) * vec_ref[n][ngram] + + if (norm_hyp[n] != 0) and (norm_ref[n] != 0): + val[n] /= (norm_hyp[n] * norm_ref[n]) + + assert (not math.isnan(val[n])) + # vrama91: added a length based gaussian penalty + val[n] *= np.e**(-(delta**2) / (2 * self.sigma**2)) + return val + + # compute log reference length + self.ref_len = np.log(float(len(self.crefs))) + + scores = [] + for test, refs in zip(self.ctest, self.crefs): + # compute vector for test captions + vec, norm, length = counts2vec(test) + # compute vector for ref captions + score = np.array([0.0 for _ in range(self.n)]) + for ref in refs: + vec_ref, norm_ref, length_ref = counts2vec(ref) + score += sim(vec, vec_ref, norm, norm_ref, length, length_ref) + # change by vrama91 - mean of ngram scores, instead of sum + score_avg = np.mean(score) + # divide by number of references + score_avg /= len(refs) + # multiply score by 10 + score_avg *= 10.0 + # append score of an image to the score list + scores.append(score_avg) + return scores + + def compute_score(self, option=None, verbose=0): + # compute idf + self.compute_doc_freq() + # assert to check document frequency + assert (len(self.ctest) >= max(self.document_frequency.values())) + # compute cider score + score = self.compute_cider() + # debug + # print score + return np.mean(np.array(score)), np.array(score) diff --git a/models/LEO/evaluator/ngram_metrics/rouge/__init__.py b/models/LEO/evaluator/ngram_metrics/rouge/__init__.py new file mode 100755 index 0000000..e69de29 diff --git a/models/LEO/evaluator/ngram_metrics/rouge/rouge.py b/models/LEO/evaluator/ngram_metrics/rouge/rouge.py new file mode 100755 index 0000000..1914bff --- /dev/null +++ b/models/LEO/evaluator/ngram_metrics/rouge/rouge.py @@ -0,0 +1,122 @@ +#!/usr/bin/env python +# +# File Name : rouge.py +# +# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004) +# +# Creation Date : 2015-01-07 06:03 +# Author : Ramakrishna Vedantam + +import json +import pdb + +import numpy as np + + +def my_lcs(string, sub): + """Calculates longest common subsequence for a pair of tokenized strings. + + :param string : list of str : tokens from a string split using whitespace + :param sub : list of str : shorter string, also split using whitespace + :returns: length (list of int): length of the longest common subsequence between the two strings + + Note: my_lcs only gives length of the longest common subsequence, not the actual LCS + """ + if (len(string) < len(sub)): + sub, string = string, sub + + lengths = [[0 for i in range(0, + len(sub) + 1)] + for j in range(0, + len(string) + 1)] + + for j in range(1, len(sub) + 1): + for i in range(1, len(string) + 1): + if (string[i - 1] == sub[j - 1]): + lengths[i][j] = lengths[i - 1][j - 1] + 1 + else: + lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1]) + + return lengths[len(string)][len(sub)] + + +class Rouge(): + """Class for computing ROUGE-L score for a set of candidate sentences for + the MS COCO test set.""" + + def __init__(self): + # vrama91: updated the value below based on discussion with Hovey + self.beta = 1.2 + + def calc_score(self, candidate, refs): + """Compute ROUGE-L score given one candidate and references for an + image. + + :param candidate: str : candidate sentence to be evaluated + :param refs: list of str : COCO reference sentences for the particular image to be evaluated + :returns score: int (ROUGE-L score for the candidate evaluated against references) + """ + # assert(len(candidate)==0) + # assert(len(refs)>0) + prec = [] + rec = [] + + # split into tokens + token_c = candidate[0].split(' ') + + for reference in refs: + # split into tokens + token_r = reference.split(' ') + # compute the longest common subsequence + lcs = my_lcs(token_r, token_c) + prec.append(lcs / float(len(token_c))) + rec.append(lcs / float(len(token_r))) + + prec_max = max(prec) + rec_max = max(rec) + + if (prec_max != 0 and rec_max != 0): + score = ((1 + self.beta**2) * prec_max * + rec_max) / float(rec_max + self.beta**2 * prec_max) + else: + score = 0.0 + return score + + def compute_score(self, gts, res): + """Computes Rouge-L score given a set of reference and candidate + sentences for the dataset Invoked by evaluate_captions.py. + + :param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values + :param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values + :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images) + """ + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + + score = [] + for id in imgIds: + hypo = res[id] + ref = gts[id] + + score.append(self.calc_score(hypo, ref)) + + # Sanity check. + assert (type(hypo) is list) + assert (len(hypo) >= 1) + assert (type(ref) is list) + assert (len(ref) > 0) + + average_score = np.mean(np.array(score)) + return average_score, np.array(score) + + def method(self): + return 'Rouge' + + +if __name__ == '__main__': + x = Rouge() + with open('/home/zhuziyu/work/vlpr/3dVL/scan2cap_result.json', 'r') as f: + json_file = json.load(f) + print( + x.compute_score(json_file['gt_sentence_mp'], + json_file['pred_sentence_mp'])[0]) diff --git a/models/LEO/evaluator/scanqa_eval.py b/models/LEO/evaluator/scanqa_eval.py new file mode 100644 index 0000000..cec5d16 --- /dev/null +++ b/models/LEO/evaluator/scanqa_eval.py @@ -0,0 +1,135 @@ +import json +from pathlib import Path + +import numpy as np +from data.data_utils import clean_answer +from evaluator.build import EVALUATOR_REGISTRY +from evaluator.ngram_metrics.bleu.bleu import Bleu +from evaluator.ngram_metrics.cider.cider import Cider +from evaluator.ngram_metrics.rouge.rouge import Rouge + + +@EVALUATOR_REGISTRY.register() +class ScanQAEvaluator(): + + def __init__(self, cfg, task_name): + self.task_name = task_name + + self.cider_scorer = Cider() + self.bleu_scorer = Bleu() + + self.rouge_scorer = Rouge() + + self.best_result = -np.inf + + self.save_dir = Path(cfg.exp_dir) / 'eval_results' / task_name + self.save_dir.mkdir(parents=True, exist_ok=True) + + self.reset() + + def reset(self): + self.eval_dict = { + 'target_metric': [], + 'em': [], + 'em_refined': [], + 'cider': 0, + 'bleu': 0, + 'rouge': 0, + } + self.total_count = 0 + self.save_results = [] + self.gt_sentence_mp = [] + self.pred_sentence_mp = [] + + def answer_match(self, pred, gts): + # return EM and refined EM + for gt in gts: + if pred == gt: + return 1, 1 + elif ''.join(pred.split()) in ''.join(gt.split()): + return 0, 1 + elif ''.join(gt.split()) in ''.join(pred.split()): + return 0, 1 + return 0, 0 + + def batch_metrics(self, data_dict): + metrics = {} + em = 0 + em_refined = 0 + for answer_pred, answer_gts in zip(data_dict['output_txt'], + data_dict['output_gt']): + answer_pred = clean_answer(answer_pred) + answer_gts = [clean_answer(gt) for gt in answer_gts] + em_flag, em_refined_flag = self.answer_match(pred=answer_pred, + gts=answer_gts) + em += em_flag + em_refined += em_refined_flag + + self.pred_sentence_mp.append([answer_pred]) + self.gt_sentence_mp.append(answer_gts) + + batch_size = len(data_dict['output_gt']) + metrics['total_count'] = batch_size + metrics['em'] = em / batch_size + metrics['em_refined'] = em_refined / batch_size + metrics['target_metric'] = metrics['em_refined'] + return metrics + + def update(self, data_dict): + metrics = self.batch_metrics(data_dict) + batch_size = metrics['total_count'] + self.total_count += batch_size + + for i in range(batch_size): + self.save_results.append({ + # vision + 'source': + data_dict['source'][i], + 'scene_id': + data_dict['scene_id'][i], + 'ID': + data_dict['question_id'][i], + # language + 'instruction': + data_dict['prompt_after_obj'][i], + 'response_gt': + data_dict['output_gt'][i], + 'response_pred': + data_dict['output_txt'][i], + }) + + for key in self.eval_dict.keys(): + if key not in ['cider', 'bleu', 'rouge']: + self.eval_dict[key].append(metrics[key] * batch_size) + + def record(self, split, is_main_process): + # ngram metrics + self.gt_sentence_mp = {k: v for k, v in enumerate(self.gt_sentence_mp)} + self.pred_sentence_mp = { + k: v + for k, v in enumerate(self.pred_sentence_mp) + } + + self.eval_dict['cider'] = self.cider_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0] + self.eval_dict['bleu'] = self.bleu_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0][-1] + self.eval_dict['rouge'] = self.rouge_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0] + + # others + for k, v in self.eval_dict.items(): + if k not in ['cider', 'bleu', 'rouge']: + self.eval_dict[k] = sum(v) / self.total_count + + if self.eval_dict['target_metric'] > self.best_result: + is_best = True + self.best_result = self.eval_dict['target_metric'] + else: + is_best = False + + if (is_best or split == 'test') and is_main_process: + with open(str(self.save_dir / 'results.json'), 'w') as f: + json.dump(self.save_results, f, indent=2) + + return is_best, self.eval_dict diff --git a/models/LEO/evaluator/sqa3d_eval.py b/models/LEO/evaluator/sqa3d_eval.py new file mode 100644 index 0000000..ca4a460 --- /dev/null +++ b/models/LEO/evaluator/sqa3d_eval.py @@ -0,0 +1,167 @@ +import json + +from data.data_utils import clean_answer +from evaluator.build import EVALUATOR_REGISTRY +from evaluator.scanqa_eval import ScanQAEvaluator + + +@EVALUATOR_REGISTRY.register() +class SQA3DEvaluator(ScanQAEvaluator): + + def reset(self): + self.eval_dict = { + 'target_metric': [], + 'em_overall': [], + 'em_refined_overall': [], + 'em_type0': [], + 'em_refined_type0': [], + 'em_type1': [], + 'em_refined_type1': [], + 'em_type2': [], + 'em_refined_type2': [], + 'em_type3': [], + 'em_refined_type3': [], + 'em_type4': [], + 'em_refined_type4': [], + 'em_type5': [], + 'em_refined_type5': [], + 'cider_overall': 0, + 'bleu_overall': 0, + 'rouge_overall': 0, + } + self.total_count = 0 + self.type_count = { + 0: 1e-10, + 1: 1e-10, + 2: 1e-10, + 3: 1e-10, + 4: 1e-10, + 5: 1e-10 + } + self.save_results = [] + self.gt_sentence_mp = [] + self.pred_sentence_mp = [] + + def batch_metrics(self, data_dict): + metrics = { + 'type0_count': 1e-10, + 'type1_count': 1e-10, + 'type2_count': 1e-10, + 'type3_count': 1e-10, + 'type4_count': 1e-10, + 'type5_count': 1e-10, + } + + em_overall = 0 + em_refined_overall = 0 + em_type = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0} + em_refined_type = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0} + + for answer_pred, answer_gts, sqa_type in zip(data_dict['output_txt'], + data_dict['output_gt'], + data_dict['sqa_type']): + answer_pred = clean_answer(answer_pred) + answer_gts = [clean_answer(gt) for gt in answer_gts] + em_flag, em_refined_flag = self.answer_match(pred=answer_pred, + gts=answer_gts) + em_overall += em_flag + em_refined_overall += em_refined_flag + + sqa_type = int(sqa_type) # 0-dim tensor to int + em_type[sqa_type] += em_flag + em_refined_type[sqa_type] += em_refined_flag + metrics[f'type{sqa_type}_count'] += 1 + + self.pred_sentence_mp.append([answer_pred]) + self.gt_sentence_mp.append(answer_gts) + + batch_size = len(data_dict['output_gt']) + metrics['total_count'] = batch_size + metrics['em_overall'] = em_overall / batch_size + metrics['em_refined_overall'] = em_refined_overall / batch_size + for key in em_type.keys(): + metrics[ + f'em_type{key}'] = em_type[key] / metrics[f'type{key}_count'] + metrics[f'em_refined_type{key}'] = em_refined_type[key] / metrics[ + f'type{key}_count'] + + metrics['target_metric'] = metrics['em_refined_overall'] + return metrics + + def update(self, data_dict): + metrics = self.batch_metrics(data_dict) + batch_size = metrics['total_count'] + self.total_count += batch_size + for key in metrics.keys(): + if 'type' in key and 'count' in key: + # type{x}_count + self.type_count[int(key[4])] += metrics[key] + + for i in range(batch_size): + self.save_results.append({ + # vision + 'source': + data_dict['source'][i], + 'scene_id': + data_dict['scene_id'][i], + 'anchor': + data_dict['anchor_locs'][i].tolist(), + 'anchor_ort': + data_dict['anchor_orientation'][i].tolist(), + # language + 'situation': + data_dict['situation'][i], + 'instruction': + data_dict['prompt_after_obj'][i], + 'response_gt': + data_dict['output_gt'][i], + 'response_pred': + data_dict['output_txt'][i], + }) + + # save eval dict + for key in self.eval_dict.keys(): + if key in ['cider_overall', 'bleu_overall', 'rouge_overall']: + continue + if 'type' in key: + self.eval_dict[key].append(metrics[key] * + metrics[f'type{key[-1]}_count']) + else: + self.eval_dict[key].append(metrics[key] * batch_size) + + def record(self, split, is_main_process): + # ngram metrics + self.gt_sentence_mp = {k: v for k, v in enumerate(self.gt_sentence_mp)} + self.pred_sentence_mp = { + k: v + for k, v in enumerate(self.pred_sentence_mp) + } + + self.eval_dict['cider_overall'] = self.cider_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0] + self.eval_dict['bleu_overall'] = self.bleu_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0][-1] + + self.eval_dict['rouge_overall'] = self.rouge_scorer.compute_score( + self.gt_sentence_mp, self.pred_sentence_mp)[0] + + # others + for k, v in self.eval_dict.items(): + if k in ['cider_overall', 'bleu_overall', 'rouge_overall']: + continue + if 'type' in k: + self.eval_dict[k] = sum(v) / self.type_count[int(k[-1])] + else: + self.eval_dict[k] = sum(v) / self.total_count + + if self.eval_dict['target_metric'] > self.best_result: + is_best = True + self.best_result = self.eval_dict['target_metric'] + else: + is_best = False + + if (is_best or split == 'test') and is_main_process: + with open(str(self.save_dir / 'results.json'), 'w') as f: + json.dump(self.save_results, f, indent=2) + + return is_best, self.eval_dict diff --git a/models/LEO/inference.py b/models/LEO/inference.py new file mode 100644 index 0000000..d613df2 --- /dev/null +++ b/models/LEO/inference.py @@ -0,0 +1,226 @@ +import json +import os +from datetime import datetime +from math import ceil + +import common.io_utils as iu +import hydra +import numpy as np +import torch +from accelerate import Accelerator +from accelerate.logging import get_logger +from accelerate.utils import set_seed +from common.misc import rgetattr +from data.data_utils import pad_tensors +from data.datasets import LeoBase +from model.leo_agent import LeoAgent +from tqdm import trange +from trainer.leo_trainer import LeoTrainer + +logger = get_logger(__name__) +device = 'cuda' if torch.cuda.is_available() else 'cpu' + + +class LeoProber(LeoTrainer, LeoBase): + + def __init__(self, cfg): + set_seed(cfg.rng_seed) + self.exp_dir = cfg.exp_dir + self.rscan_base = cfg.data.rscan_base + self.scannet_base = cfg.data.scan_family_base + self.num_points = cfg.data.num_points + self.max_obj_len = cfg.data.max_obj_len + self.batch_size = cfg.dataloader.eval.batchsize + self.split = 'test' + self.save_obj_tokens = cfg.probe.save_obj_tokens + + # dummpy accelerator + self.accelerator = Accelerator() + + # load model + self.model = LeoAgent(cfg) + self.model.to(device) + self.model.eval() + + self_best_ckpt = os.path.join(self.exp_dir, 'best.pth') + if os.path.exists(self_best_ckpt): + self.pretrained_ckpt_path = self_best_ckpt + elif cfg.pretrained_ckpt_path and os.path.exists( + cfg.pretrained_ckpt_path): + self.pretrained_ckpt_path = cfg.pretrained_ckpt_path + else: + raise ValueError('No checkpoint to load for evaluation') + + logger.info(f'Probe: load model from {self.pretrained_ckpt_path}') + self.load(path=self.pretrained_ckpt_path, model_only=True) + + # prepare data + self.sources = [cfg.probe.sources] if isinstance( + cfg.probe.sources, str) else list(cfg.probe.sources) + self.scene_ids = [cfg.probe.scene_ids] if isinstance( + cfg.probe.scene_ids, str) else list(cfg.probe.scene_ids) + self.situations = [cfg.probe.situations] if isinstance( + cfg.probe.situations, str) else list(cfg.probe.situations) + self.instructions = [cfg.probe.instructions] if isinstance( + cfg.probe.instructions, str) else list(cfg.probe.instructions) + + self.num_samples = max(len(self.sources), len(self.scene_ids), + len(self.situations), len(self.instructions)) + if len(self.sources) == 1: + self.sources = self.sources * self.num_samples + if len(self.scene_ids) == 1: + self.scene_ids = self.scene_ids * self.num_samples + if len(self.situations) == 1: + self.situations = self.situations * self.num_samples + if len(self.instructions) == 1: + self.instructions = self.instructions * self.num_samples + + assert len(self.sources) == len(self.scene_ids) == len( + self.situations) == len(self.instructions) + + self.data_dict = { + 'source': + self.sources, + 'scene_id': + self.scene_ids, + 'prompt_before_obj': [ + self.role_prompt + self.situation_prompt.format(situation=s) + for s in self.situations + ], + 'prompt_middle_1': [self.egoview_prompt] * self.num_samples, + 'prompt_middle_2': [self.objects_prompt] * self.num_samples, + 'prompt_after_obj': [], + 'obj_fts': [], + 'obj_masks': [], + 'obj_locs': [], + 'anchor_locs': + torch.zeros(self.num_samples, 3, device=device), + 'img_fts': + torch.zeros(self.num_samples, 3, 224, 224, device=device), + 'img_masks': + torch.zeros(self.num_samples, 1, dtype=torch.bool, device=device), + } + for instruction in self.instructions: + if 'USER:' in instruction: + # dialogue + self.data_dict['prompt_after_obj'].append(instruction) + else: + # single question + self.data_dict['prompt_after_obj'].append( + self.task_prompt.format(instruction=instruction)) + + anchor_orient = torch.zeros(self.num_samples, 4, device=device) + anchor_orient[:, -1] = 1 + self.data_dict['anchor_orientation'] = anchor_orient + + # load scene + for source, scene_id in zip(self.sources, self.scene_ids): + obj_fts, obj_masks, obj_locs = self.load_scene(source, scene_id) + self.data_dict['obj_fts'].append(obj_fts) + self.data_dict['obj_masks'].append(obj_masks) + self.data_dict['obj_locs'].append(obj_locs) + + self.data_dict['obj_fts'] = torch.stack( + self.data_dict['obj_fts']).to(device) + self.data_dict['obj_masks'] = torch.stack( + self.data_dict['obj_masks']).to(device) + self.data_dict['obj_locs'] = torch.stack( + self.data_dict['obj_locs']).to(device) + + self.save_dir = os.path.join(self.exp_dir, 'probe') + iu.make_dir(self.save_dir) + self.log_path = os.path.join(self.save_dir, 'results.json') + if os.path.exists(self.log_path): + with open(self.log_path, 'r') as f: + self.log = json.load(f) + else: + self.log = {} + if self.pretrained_ckpt_path not in self.log: + self.log[self.pretrained_ckpt_path] = [] + + def load_scene(self, source, scene_id): + if source.lower() in ['3rscan', 'scannet']: + if source.lower() == '3rscan': + obj_pcds = self.load_rscan(scene_id)['obj_pcds'] + elif source.lower() == 'scannet': + obj_pcds = self.load_scannet(scene_id)['obj_pcds'] + selected_obj_pcds = list(obj_pcds.values())[:self.max_obj_len] + elif source.lower() == 'objaverse': + raise NotImplementedError + elif source.lower() in ['mp3d', 'hm3d']: + raise NotImplementedError + elif source.lower() in ['cliport', 'arnold']: + raise NotImplementedError + else: + raise ValueError(f'Unsupported source: {source}') + + obj_fts, obj_locs, _ = self.preprocess_pcd(selected_obj_pcds, + return_anchor=False) + obj_fts = pad_tensors(obj_fts, lens=self.max_obj_len, + pad=1.0).float() # O, num_points, 6 + obj_masks = (torch.arange(self.max_obj_len) < len(obj_locs)) # O + obj_locs = pad_tensors(obj_locs, lens=self.max_obj_len, + pad=0.0).float() # O, 6 + return obj_fts, obj_masks, obj_locs + + @torch.no_grad() + def run(self): + for i in trange(ceil(self.num_samples / self.batch_size)): + batch_data_dict = {} + for k in self.data_dict.keys(): + batch_data_dict[k] = self.data_dict[k][self.batch_size * + i:self.batch_size * + (i + 1)] + output = self.forward(batch_data_dict, inference=True) + for j in range(self.batch_size): + idx = self.batch_size * i + j + if idx >= self.num_samples: + break + response_log = { + 'source': self.sources[idx], + 'scene_id': self.scene_ids[idx], + 'situation': self.situations[idx], + 'instruction': self.instructions[idx], + 'response': output['output_txt'][j], + } + logger.info(response_log) + self.log[self.pretrained_ckpt_path].append(response_log) + + if self.save_obj_tokens: + torch.save( + { + 'obj_tokens': + output['obj_tokens'][j].unsqueeze(0).cpu(), + 'obj_masks': + output['obj_masks'][j].unsqueeze(0).cpu(), + }, + os.path.join( + self.save_dir, + f'{self.sources[idx]}-{self.scene_ids[idx]}.pth')) + + with open(self.log_path, 'w') as f: + json.dump(self.log, f, indent=2) + + +@hydra.main(config_path='configs', config_name='default', version_base=None) +def main(cfg): + naming_keys = [cfg.name] + for name in cfg.naming_keywords: + key = str(rgetattr(cfg, name)) + if key: + naming_keys.append(key) + exp_name = '_'.join(naming_keys) + + # Record the experiment + cfg.exp_dir = os.path.join( + cfg.base_dir, exp_name, + f"{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}" + if 'time' in cfg.naming_keywords else '') + iu.make_dir(cfg.exp_dir) + + prober = LeoProber(cfg) + prober.run() + + +if __name__ == '__main__': + main() diff --git a/models/LEO/launch.py b/models/LEO/launch.py new file mode 100755 index 0000000..2f8fddc --- /dev/null +++ b/models/LEO/launch.py @@ -0,0 +1,110 @@ +import argparse +from datetime import datetime + +import common.launch_utils as lu + + +def parse_args(): + + def str2bool(v): + if v.lower() in ('yes', 'true', 't', 'y', '1'): + return True + elif v.lower() in ('no', 'false', 'f', 'n', '0'): + return False + else: + return argparse.ArgumentTypeError('Unsupported value encountered') + + parser = argparse.ArgumentParser() + + # General settings + parser.add_argument('--mode', + default='submitit', + type=str, + help='Launch mode (submitit | accelerate | python)') + parser.add_argument('--debug', + default=False, + type=str2bool, + help='Debug mode (True | False)') + + # Slurm settings + parser.add_argument('--name', + default='leo', + type=str, + help='Name of the job') + parser.add_argument('--run_file', + default='run.py', + type=str, + help='File position of launcher file') + parser.add_argument('--job_dir', + default='jobs/%j', + type=str, + help='Directory to save the job logs') + parser.add_argument('--num_nodes', + default=1, + type=int, + help='Number of nodes to use in SLURM') + parser.add_argument('--gpu_per_node', + default=4, + type=int, + help='Number of gpus to use in each node') + parser.add_argument('--cpu_per_task', + default=32, + type=int, + help='Number of cpus to use for each gpu') + parser.add_argument('--qos', + default='lv0b', + type=str, + help='Qos of the job') + parser.add_argument('--partition', + default='HGX', + type=str, + help='Partition of the job') + parser.add_argument('--account', + default='research', + type=str, + help='Account of the job') + parser.add_argument('--mem_per_gpu', + default=100, + type=int, + help='Memory allocated for each gpu in GB') + parser.add_argument('--time', + default=24, + type=int, + help='Time allocated for the job in hours') + parser.add_argument('--port', + default=1234, + type=int, + help='Default port for distributed training') + parser.add_argument('--nodelist', + default='', + type=str, + help='Default node id for distributed training') + + # Accelerate settings + parser.add_argument( + '--mixed_precision', + default='no', + type=str, + help='Mixed precision training, options (no | fp16 | bf16)') + + # Additional Training settings + parser.add_argument('--config', + default='configs/default.yaml', + type=str, + help='Path to the config file') + parser.add_argument('opts', + default=None, + nargs=argparse.REMAINDER, + help='Additional options to change configureation') + return parser.parse_args() + + +def main(): + args = parse_args() + getattr(lu, f'{args.mode}_launch')(args) + print( + f"[{datetime.now().strftime('%Y-%m-%d %H:%M:%S,%f')[:-3]}] - Launched") + + +if __name__ == '__main__': + main() diff --git a/models/LEO/model/__init__.py b/models/LEO/model/__init__.py new file mode 100755 index 0000000..fc14b82 --- /dev/null +++ b/models/LEO/model/__init__.py @@ -0,0 +1,3 @@ +from .leo_agent import LeoAgent +from .vision2d import GridFeatureExtractor2D +from .vision3d import OSE3D diff --git a/models/LEO/model/build.py b/models/LEO/model/build.py new file mode 100755 index 0000000..bb50261 --- /dev/null +++ b/models/LEO/model/build.py @@ -0,0 +1,7 @@ +from fvcore.common.registry import Registry + +MODULE_REGISTRY = Registry('Module') + + +def build_module(cfg): + return MODULE_REGISTRY.get(cfg.name)(cfg) diff --git a/models/LEO/model/leo_agent.py b/models/LEO/model/leo_agent.py new file mode 100644 index 0000000..d5c7ecd --- /dev/null +++ b/models/LEO/model/leo_agent.py @@ -0,0 +1,558 @@ +import math + +import clip +import torch +import torch.nn as nn +import torch.nn.functional as F +from accelerate.logging import get_logger +from einops import rearrange +from model.build import build_module +from model.utils import disabled_train, maybe_autocast +from peft import LoraConfig, get_peft_model +from transformers import (AutoModelForCausalLM, AutoTokenizer, + LlamaForCausalLM, LlamaTokenizer) + +logger = get_logger(__name__) + + +class LeoAgent(nn.Module): + + def __init__(self, cfg): + super().__init__() + + # LLM + if 'vicuna' in cfg.llm.name.lower(): + self.llm_tokenizer = LlamaTokenizer.from_pretrained( + cfg.llm.cfg_path, truncation_side=cfg.llm.truncation_side) + self.llm_tokenizer.add_special_tokens({'pad_token': '[PAD]'}) + self.llm_model = LlamaForCausalLM.from_pretrained( + cfg.llm.cfg_path, torch_dtype=torch.float16) + self.llm_model.resize_token_embeddings(len(self.llm_tokenizer)) + else: + self.llm_tokenizer = AutoTokenizer.from_pretrained( + cfg.llm.cfg_path, truncation_side=cfg.llm.truncation_side) + self.llm_model = AutoModelForCausalLM.from_pretrained( + cfg.llm.cfg_path, torch_dtype=torch.float16) + + logger.info(f'Build {cfg.llm.name} from {cfg.llm.cfg_path}') + + for param in self.llm_model.parameters(): + param.requires_grad = False + self.llm_model.eval() + self.llm_model.train = disabled_train + logger.info('Freeze LLM') + + # 2D vision + self.img_encoder = build_module(cfg.vision2d) + self.img_proj = nn.Linear(self.img_encoder.out_channels, + self.llm_model.config.hidden_size) + + # 3D vision + self.pcd_encoder = build_module(cfg.vision3d) + self.pcd_proj = nn.Linear(cfg.vision3d.hidden_dim, + self.llm_model.config.hidden_size) + + # type embedding + # self.img_type_embed = nn.Parameter(torch.zeros(self.llm_model.config.hidden_size), requires_grad=True) + # self.pcd_type_embed = nn.Parameter(torch.zeros(self.llm_model.config.hidden_size), requires_grad=True) + + # LoRA + if cfg.llm.lora.flag: + logger.info(f'Apply LoRA with configs: {cfg.llm.lora}') + lora_config = LoraConfig( + r=cfg.llm.lora.rank, + lora_alpha=cfg.llm.lora.alpha, + target_modules=cfg.llm.lora.target_modules, + lora_dropout=cfg.llm.lora.dropout, + bias='none', + modules_to_save=[], + ) + self.llm_model = get_peft_model(self.llm_model, + peft_config=lora_config) + + self.max_context_len = cfg.llm.max_context_len + self.max_out_len = cfg.llm.max_out_len + + # additional text x multi-modal tokens fusion + self.clip_txt_guidance = cfg.clip_txt_guidance.flag + if self.clip_txt_guidance: + logger.info('Add CLIP semantics guidance') + self.clip_model = clip.load('RN50')[0] + for param in self.clip_model.parameters(): + param.requires_grad = False + self.clip_model.eval() + self.clip_model.train = disabled_train + self.clip_proj = nn.Linear(cfg.clip_txt_guidance.clip_out_dim, + self.llm_model.config.hidden_size) + + @property + def device(self): + return list(self.parameters())[0].device + + def count_params(self, parameters): + tot = sum([math.prod(p.shape) for p in parameters]) + return tot + + def show_params_size(self, tot): + if tot >= 1e9: + return '{:.1f}B'.format(tot / 1e9) + elif tot >= 1e6: + return '{:.1f}M'.format(tot / 1e6) + else: + return '{:.1f}k'.format(tot / 1e3) + + def get_learnable_named_params(self): + learnable_named_params = {} + frozen_named_params = {} + for n, p in self.named_parameters(): + if p.requires_grad: + learnable_named_params.update({n: p}) + else: + frozen_named_params.update({n: p}) + learnable_params_size = self.count_params( + learnable_named_params.values()) + frozen_params_size = self.count_params(frozen_named_params.values()) + logger.info( + f'Build LEO with {self.show_params_size(learnable_params_size+frozen_params_size)} parameters, ' + + + f'{self.show_params_size(learnable_params_size)} learnable and ' + + f'{self.show_params_size(frozen_params_size)} frozen') + logger.info(f'🧊 Frozen parameters: {list(frozen_named_params.keys())}') + logger.info( + f'🔥 Tuned parameters: {list(learnable_named_params.keys())}') + + return learnable_named_params + + def build_right_justified_sequence(self, data_dict): + """Concat six sequences: `prompt_before_obj`, `prompt_middle_1`, + `img_tokens`, `prompt_middle_2`, `obj_tokens`, `prompt_after_obj`. + + Return right justified sequence for causal LM: , , + , , . + """ + device = self.device + bs = len(data_dict['prompt_before_obj']) + + self.llm_tokenizer.padding_side = 'left' + text_input_tokens_pre = self.llm_tokenizer( + data_dict['prompt_before_obj'], + return_tensors='pt', + padding='longest').to(device) # [PAD, BOS, tokens], (B, T1) + + text_input_tokens_mid1 = self.llm_tokenizer( + data_dict['prompt_middle_1'], + return_tensors='pt', + padding='longest').to(device) + + img_tokens = data_dict['img_tokens'].to(device) + img_masks = data_dict['img_masks'].to(device) + img_masks = img_masks.reshape(-1, 1).repeat(1, img_tokens.size(1)) + + text_input_tokens_mid2 = self.llm_tokenizer( + data_dict['prompt_middle_2'], + return_tensors='pt', + padding='longest').to(device) + + obj_tokens = data_dict['obj_tokens'].to(device) + obj_masks = data_dict['obj_masks'].to(device) + + # additional clip fusion + if self.clip_txt_guidance: + with torch.no_grad(): + clip_fts = self.clip_model.encode_text( + clip.tokenize(data_dict['prompt_after_obj'], + truncate=True).to(device)) + clip_fts = self.clip_proj(clip_fts.to(obj_tokens.dtype)) + # B, N, C + img_tokens = torch.einsum('bnc,bc->bnc', img_tokens, clip_fts) + obj_tokens = torch.einsum('bnc,bc->bnc', obj_tokens, clip_fts) + + self.llm_tokenizer.padding_side = 'right' # no need to be 'left', as padding tokens will be shifted + self.llm_tokenizer.truncation_side = 'left' # truncate history + text_input_tokens_post = self.llm_tokenizer( + data_dict['prompt_after_obj'], + return_tensors='pt', + padding='longest', + truncation=True, + max_length=self.max_context_len, + ).to(device) # [BOS, tokens, PAD], (B, T3) + + assert text_input_tokens_mid1.attention_mask.all() and text_input_tokens_mid2.attention_mask.all(), \ + 'prompt_middle should be the same and thus no padding' + + # remove bos, make "tokenize subseq and concat" equivalent to "tokenize the whole seq" + text_input_tokens_mid1.input_ids = text_input_tokens_mid1.input_ids[:, + 1:] + text_input_tokens_mid1.attention_mask = text_input_tokens_mid1.attention_mask[:, + 1:] + text_input_tokens_mid2.input_ids = text_input_tokens_mid2.input_ids[:, + 1:] + text_input_tokens_mid2.attention_mask = text_input_tokens_mid2.attention_mask[:, + 1:] + text_input_tokens_post.input_ids = text_input_tokens_post.input_ids[:, + 1:] + text_input_tokens_post.attention_mask = text_input_tokens_post.attention_mask[:, + 1:] + for i in range(bs): + if not img_masks[i].any(): + # no image input, also mask the text prompt for image tokens + text_input_tokens_mid1.attention_mask[i].fill_(0) + + inputs_embeds_pre = self.llm_model.get_input_embeddings()( + text_input_tokens_pre.input_ids) + inputs_embeds_mid1 = self.llm_model.get_input_embeddings()( + text_input_tokens_mid1.input_ids) + inputs_embeds_mid2 = self.llm_model.get_input_embeddings()( + text_input_tokens_mid2.input_ids) + inputs_embeds_post = self.llm_model.get_input_embeddings()( + text_input_tokens_post.input_ids) + + # since img_tokens, prompt_mid, obj_tokens are fixed length without padding, we concat them first + inputs_embeds_mid = torch.cat( + [inputs_embeds_mid1, img_tokens, inputs_embeds_mid2, obj_tokens], + dim=1) + attn_mask_mid = torch.cat( + [ + text_input_tokens_mid1.attention_mask, img_masks, + text_input_tokens_mid2.attention_mask, obj_masks + ], + dim=1, + ) + + post_pad_length = torch.logical_not( + text_input_tokens_post.attention_mask).sum(-1) + + bs, l1, hidden_dim = inputs_embeds_pre.shape + _, l2, _ = inputs_embeds_mid.shape + _, l3, _ = inputs_embeds_post.shape + + inputs_embeds = torch.zeros(bs, l1 + l2 + l3, hidden_dim).type( + inputs_embeds_pre.dtype).to(device) + attention_mask = torch.zeros(bs, l1 + l2 + l3).type( + obj_masks.dtype).to(device) + + # assign by chunks + for i in range(bs): + post_pad_len = post_pad_length[i] + + if post_pad_len > 0: + inputs_embeds[i, :post_pad_len] = inputs_embeds_post[ + i, -post_pad_len:] + attention_mask[i, :post_pad_len] = 0 + inputs_embeds[i, post_pad_len + l1 + + l2:] = inputs_embeds_post[i, :-post_pad_len] + attention_mask[i, post_pad_len + l1 + l2:] = 1 + else: + # no padding + inputs_embeds[i, -l3:] = inputs_embeds_post[i] + attention_mask[i, -l3:] = 1 + + inputs_embeds[i, post_pad_len:post_pad_len + + l1] = inputs_embeds_pre[i] + attention_mask[i, post_pad_len:post_pad_len + + l1] = text_input_tokens_pre.attention_mask[i] + + inputs_embeds[i, post_pad_len + l1:post_pad_len + l1 + + l2] = inputs_embeds_mid[i] + attention_mask[i, post_pad_len + l1:post_pad_len + l1 + + l2] = attn_mask_mid[i] + + return inputs_embeds, attention_mask + + def forward(self, data_dict): + """data_dict requires keys: + + # input + prompt_before_obj: list of str, (B,) + prompt_middle_1: list of str, (B,) + prompt_middle_2: list of str, (B,) + prompt_after_obj: list of str, (B,) + obj_fts: (B, N, P, 6), xyz + rgb + obj_masks: (B, N), 1 valid and 0 masked + obj_locs: (B, N, 6), xyz + whd + anchor_locs: (B, 3) + anchor_orientation: (B, C) + img_fts: (B, 3, H, W), rgb + img_masks: (B, 1), 1 valid and 0 masked + # output + output_gt: list of str, (B,) + """ + device = self.device + bs = len(data_dict['prompt_after_obj']) + if 'obj_tokens' not in data_dict: + + data_dict = self.pcd_encoder(data_dict) + + data_dict['obj_tokens'] = self.pcd_proj( + data_dict['obj_tokens'].to(device)) + # data_dict['obj_tokens'] = data_dict['obj_tokens'] + self.pcd_type_embed + + data_dict['img_tokens'] = self.img_proj( + self.img_encoder(data_dict['img_fts'])) + # data_dict['img_tokens'] = data_dict['img_tokens'] + self.img_type_embed + + inputs_embeds, attention_mask = self.build_right_justified_sequence( + data_dict=data_dict) + # (B, T1+O+T2, D), (B, T1+O+T2) + + self.llm_tokenizer.padding_side = 'right' + self.llm_tokenizer.truncation_side = 'right' + text_output_tokens = self.llm_tokenizer( + [t + self.llm_tokenizer.eos_token for t in data_dict['output_gt']], + return_tensors='pt', + padding='longest', + truncation=True, + max_length=self.max_out_len, + ).to(device) + + text_output_embeds = self.llm_model.get_input_embeddings()( + text_output_tokens.input_ids) # (B, T3, D) + inputs_embeds = torch.cat([inputs_embeds, text_output_embeds], + dim=1) # (B, T1+O+T2+T3, D) + attention_mask = torch.cat( + [attention_mask, text_output_tokens.attention_mask], + dim=1) # (B, T1+O+T2+T3) + + # construct targets + targets = torch.zeros_like(attention_mask).long().fill_( + -100) # (B, T1+O+T2+T3) + + # only apply loss to answer tokens + targets_idx = text_output_tokens.attention_mask.bool() + targets[:, -targets_idx.shape[1]:][ + targets_idx] = text_output_tokens.input_ids[targets_idx] + + # do not predict bos token, regard it as condition instead + targets[:, -targets_idx.shape[1]] = -100 + + with maybe_autocast(self): + outputs = self.llm_model( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True, + output_hidden_states=True, + ) + + logits = outputs.logits.float() + + # different from the loss inside `llm_model.forward`, here we take mean of each sequence instead of sum + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = targets[..., 1:].contiguous() + num_tokens_for_loss = (shift_labels >= 0).int().sum(1) # (B,) + + shift_logits = rearrange(shift_logits, 'b t v -> (b t) v') + shift_labels = rearrange(shift_labels, 'b t -> (b t)') + + shift_labels = shift_labels.to(shift_logits.device) + loss = F.cross_entropy(shift_logits, shift_labels, reduction='none') + loss = rearrange(loss, '(b t) -> b t', b=bs) + loss = loss.sum(1) / num_tokens_for_loss # (B,) + + data_dict.update({'loss': loss}) + # do not average loss, average txt and eai respectively in Trainer.train_step() instead + return data_dict + + @torch.no_grad() + def generate( + self, + data_dict, + use_nucleus_sampling=False, + num_beams=5, + max_length=256, + min_length=1, + top_p=0.9, + repetition_penalty=3.0, + length_penalty=1, + num_captions=1, + temperature=1, + ): + """data_dict requires the same keys as forward() except output_gt.""" + device = self.device + bs = len(data_dict['prompt_after_obj']) + if 'obj_tokens' not in data_dict: + # obtain obj tokens + data_dict = self.pcd_encoder(data_dict) + + data_dict['obj_tokens'] = self.pcd_proj( + data_dict['obj_tokens'].to(device)) + # data_dict['obj_tokens'] = data_dict['obj_tokens'] + self.pcd_type_embed + + data_dict['img_tokens'] = self.img_proj( + self.img_encoder(data_dict['img_fts'])) + # data_dict['img_tokens'] = data_dict['img_tokens'] + self.img_type_embed + + inputs_embeds, attention_mask = self.build_right_justified_sequence( + data_dict=data_dict) + + # give bos token as condition + bos_tokens = self.llm_tokenizer( + [self.llm_tokenizer.bos_token] * bs, + return_tensors='pt', + ).to(device) + bos_tokens_ids = bos_tokens.input_ids[:, 0:1] # (B, 1) + bos_tokens_attn = bos_tokens.attention_mask[:, 0:1] # (B, 1) + + # prepare a `bos_token` + bos_embeds = self.llm_model.get_input_embeddings()( + bos_tokens_ids) # (B, 1, D) + inputs_embeds = torch.cat([inputs_embeds, bos_embeds], + dim=1) # (B, T1+O+T2+1, D) + attention_mask = torch.cat([attention_mask, bos_tokens_attn], + dim=1) # (B, T1+O+T2+1) + + with maybe_autocast(self): + outputs = self.llm_model.generate( + inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + do_sample=use_nucleus_sampling, + top_p=top_p, + temperature=temperature, + num_beams=num_beams, + max_length=max_length, + min_length=min_length, + repetition_penalty=repetition_penalty, + length_penalty=length_penalty, + num_return_sequences=num_captions, + ) + + outputs[outputs == self.llm_tokenizer. + unk_token_id] = self.llm_tokenizer.eos_token_id + # data_dict['output_tokens'] = outputs # unable to gather variable-length tensors + + output_txt = self.llm_tokenizer.batch_decode(outputs, + skip_special_tokens=True) + output_txt = [txt.strip() for txt in output_txt] + data_dict['output_txt'] = output_txt + return data_dict + + @torch.no_grad() + def predict_answers(self, data_dict, answer_list, num_ans_candidates=128): + """(1) Generate the first token and select most probable candidates + (num_ans_candidates) (2) Then select answers from answer list, which + start with the probable tokens (3) Lastly, use the selected answers as + the ground-truth labels and calculate LM loss Return the answer that + minimize the loss as the predicted answer.""" + device = self.device + num_ans_candidates = min(num_ans_candidates, len(answer_list)) + + self.llm_tokenizer.padding_side = 'right' + answer_candidates = self.llm_tokenizer(answer_list, + padding='longest', + return_tensors='pt').to(device) + + answer_ids = answer_candidates.input_ids + answer_atts = answer_candidates.attention_mask + + # (1) + if 'obj_tokens' not in data_dict: + data_dict = self.pcd_encoder(data_dict) + + data_dict['obj_tokens'] = self.pcd_proj( + data_dict['obj_tokens'].to(device)) + # data_dict['obj_tokens'] = data_dict['obj_tokens'] + self.pcd_type_embed + + data_dict['img_tokens'] = self.img_proj( + self.img_encoder(data_dict['img_fts'])) + # data_dict['img_tokens'] = data_dict['img_tokens'] + self.img_type_embed + + inputs_embeds, attention_mask = self.build_right_justified_sequence( + data_dict=data_dict) + bs = inputs_embeds.shape[0] + + # give bos token as condition + bos_tokens_ids = answer_ids[0, 0].view(1, 1).repeat(bs, 1) # (B, 1) + bos_tokens_attn = answer_atts[0, 0].view(1, 1).repeat(bs, 1) # (B, 1) + + bos_embeds = self.llm_model.get_input_embeddings()( + bos_tokens_ids) # (B, 1, D) + inputs_embeds = torch.cat([inputs_embeds, bos_embeds], + dim=1) # (B, T1+O+T2+1, D) + attention_mask = torch.cat([attention_mask, bos_tokens_attn], + dim=1) # (B, T1+O+T2+1) + + with maybe_autocast(self): + start_output = self.llm_model(inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + return_dict=True) + logits = start_output.logits[:, -1, :] # first predicted token's logit + + answer_first_token = answer_ids[:, 1] + prob_first_token = F.softmax(logits, dim=1).index_select( + dim=1, index=answer_first_token) + topk_probs, topk_ids = prob_first_token.topk(num_ans_candidates, dim=1) + # (bs, num_ans_candidates) + + # (2) + ans_ids = [] + ans_atts = [] + for topk_id in topk_ids: + ans_ids.append(answer_ids.index_select(dim=0, index=topk_id)) + ans_atts.append(answer_atts.index_select(dim=0, index=topk_id)) + ans_ids = torch.cat(ans_ids, dim=0) + ans_atts = torch.cat(ans_atts, dim=0) + # (B * num_ans_candidates, T3) + + inputs_embeds = inputs_embeds.repeat_interleave(num_ans_candidates, + dim=0) + attention_mask = attention_mask.repeat_interleave(num_ans_candidates, + dim=0) + # (B * num_ans_candidates, T1+O+T2+1, D), (B * num_ans_candidates, T1+O+T2+1) + + # truncate the appended bos token before concat + inputs_embeds = inputs_embeds[:, :-1, :] + attention_mask = attention_mask[:, :-1] + # (B * num_ans_candidates, T1+O+T2, D), (B * num_ans_candidates, T1+O+T2) + + ans_embeds = self.llm_model.get_input_embeddings()( + ans_ids) # (B * num_ans_candidates, T3, D) + inputs_embeds = torch.cat( + [inputs_embeds, ans_embeds], + dim=1) # (B * num_ans_candidates, T1+O+T2+T3, D) + attention_mask = torch.cat( + [attention_mask, ans_atts], + dim=1) # (B * num_ans_candidates, T1+O+T2+T3) + + targets_ids = torch.zeros_like(attention_mask).long().fill_( + -100) # (B * num_ans_candidates, T1+O+T2+T3) + # only apply loss to answer tokens + targets_idx = ans_atts.bool() + targets_ids[:, + -targets_idx.shape[1]:][targets_idx] = ans_ids[targets_idx] + + # ignore the prediction of bos token + targets_ids[:, -targets_idx.shape[1]] = -100 + + # (3) + with maybe_autocast(self): + output = self.llm_model(inputs_embeds=inputs_embeds, + attention_mask=attention_mask, + labels=targets_ids, + return_dict=True) + + logits = output.logits.float() + + # get loss + shift_logits = logits[..., :-1, :].contiguous() + shift_labels = targets_ids[..., 1:].contiguous() + num_tokens_for_loss = (shift_labels >= 0).int().sum(1) + + shift_logits = rearrange(shift_logits, 'b t v -> (b t) v') + shift_labels = rearrange(shift_labels, 'b t -> (b t)') + + shift_labels = shift_labels.to(shift_logits.device) + loss = F.cross_entropy(shift_logits, shift_labels, + reduction='none') # get loss per token + + loss = rearrange(loss, '(b t) -> b t', b=bs * num_ans_candidates) + loss = loss.sum( + 1 + ) / num_tokens_for_loss # get loss per sequence, average over tokens + loss = rearrange(loss, '(b1 b2) -> b1 b2', b1=bs) + + max_topk_ids = (-loss).argmax(dim=1) + max_ids = topk_ids[max_topk_ids >= 0, max_topk_ids] + + data_dict['answer_id'] = max_ids + data_dict['output_txt'] = [answer_list[max_id] for max_id in max_ids] + + return data_dict diff --git a/models/LEO/model/pcd_backbone.py b/models/LEO/model/pcd_backbone.py new file mode 100755 index 0000000..7763699 --- /dev/null +++ b/models/LEO/model/pcd_backbone.py @@ -0,0 +1,56 @@ +import os + +import torch +from accelerate.logging import get_logger +from einops import rearrange +from hydra.utils import instantiate +# from model.pointnext.pointnext import PointNext +from model.pointbert.pointbert import PointBERT +from model.pointnetpp.pointnetpp import PointNetPP +from model.utils import disabled_train +from torch import nn + +logger = get_logger(__name__) + + +class PointcloudBackbone(nn.Module): + + def __init__(self, cfg): + super().__init__() + + self.pcd_net = instantiate(cfg.net) + self.backbone_name = cfg.net._target_.split('.')[-1] + self.out_dim = self.pcd_net.out_dim + # logger.info(f"Build PointcloudBackbone: {self.backbone_name}") + + path = cfg.path + if path is not None and os.path.exists(path): + self.pcd_net.load_state_dict(torch.load(path), strict=False) + logger.info(f'Load {self.backbone_name} weights from {path}') + + self.freeze = cfg.freeze + if self.freeze: + for p in self.parameters(): + p.requires_grad = False + self.eval() + self.train = disabled_train + + +# logger.info(f"Freeze {self.backbone_name}") + + def forward_normal(self, obj_pcds): + # obj_pcds: (batch_size, num_objs, num_points, 6) + batch_size = obj_pcds.shape[0] + obj_embeds = self.pcd_net(rearrange(obj_pcds, 'b o p d -> (b o) p d')) + obj_embeds = rearrange(obj_embeds, '(b o) d -> b o d', b=batch_size) + return obj_embeds + + @torch.no_grad() + def forward_frozen(self, obj_pcds): + return self.forward_normal(obj_pcds) + + def forward(self, obj_pcds): + if self.freeze: + return self.forward_frozen(obj_pcds) + else: + return self.forward_normal(obj_pcds) diff --git a/models/LEO/model/pointbert/dvae.py b/models/LEO/model/pointbert/dvae.py new file mode 100644 index 0000000..70863a1 --- /dev/null +++ b/models/LEO/model/pointbert/dvae.py @@ -0,0 +1,136 @@ +import os +import sys + +import torch +import torch.nn as nn + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +import misc + + +### ref https://github.com/Strawberry-Eat-Mango/PCT_Pytorch/blob/main/util.py ### +def knn_point(nsample, xyz, new_xyz): + """ + Input: + nsample: max sample number in local region + xyz: all points, [B, N, C] + new_xyz: query points, [B, S, C] + Return: + group_idx: grouped points index, [B, S, nsample] + """ + sqrdists = square_distance(new_xyz, xyz) + _, group_idx = torch.topk(sqrdists, + nsample, + dim=-1, + largest=False, + sorted=False) + return group_idx + + +def square_distance(src, dst): + """Calculate Euclid distance between each two points. + + src^T * dst = xn * xm + yn * ym + zn * zm; + sum(src^2, dim=-1) = xn*xn + yn*yn + zn*zn; + sum(dst^2, dim=-1) = xm*xm + ym*ym + zm*zm; + dist = (xn - xm)^2 + (yn - ym)^2 + (zn - zm)^2 + = sum(src**2,dim=-1)+sum(dst**2,dim=-1)-2*src^T*dst + Input: + src: source points, [B, N, C] + dst: target points, [B, M, C] + Output: + dist: per-point square distance, [B, N, M] + """ + B, N, _ = src.shape + _, M, _ = dst.shape + dist = -2 * torch.matmul(src, dst.permute(0, 2, 1)) + dist += torch.sum(src**2, -1).view(B, N, 1) + dist += torch.sum(dst**2, -1).view(B, 1, M) + return dist + + +class Group(nn.Module): + + def __init__(self, num_group, group_size): + super().__init__() + self.num_group = num_group + self.group_size = group_size + # self.knn = KNN(k=self.group_size, transpose_mode=True) + + def forward(self, xyz): + ''' + input: B N 3 + --------------------------- + output: B G M 3 + center : B G 3 + ''' + B, N, C = xyz.shape + if C > 3: + data = xyz + xyz = data[:, :, :3] + rgb = data[:, :, 3:] + batch_size, num_points, _ = xyz.shape + # fps the centers out + center = misc.fps(xyz, self.num_group) # B G 3 + + # knn to get the neighborhood + # _, idx = self.knn(xyz, center) # B G M + idx = knn_point(self.group_size, xyz, center) # B G M + assert idx.size(1) == self.num_group + assert idx.size(2) == self.group_size + idx_base = torch.arange(0, batch_size, device=xyz.device).view( + -1, 1, 1) * num_points + idx = idx + idx_base + idx = idx.view(-1) + + neighborhood_xyz = xyz.view(batch_size * num_points, -1)[idx, :] + neighborhood_xyz = neighborhood_xyz.view(batch_size, self.num_group, + self.group_size, + 3).contiguous() + if C > 3: + neighborhood_rgb = rgb.view(batch_size * num_points, -1)[idx, :] + neighborhood_rgb = neighborhood_rgb.view(batch_size, + self.num_group, + self.group_size, + -1).contiguous() + + # normalize xyz + neighborhood_xyz = neighborhood_xyz - center.unsqueeze(2) + if C > 3: + neighborhood = torch.cat((neighborhood_xyz, neighborhood_rgb), + dim=-1) + else: + neighborhood = neighborhood_xyz + return neighborhood, center + + +class Encoder(nn.Module): + + def __init__(self, encoder_channel, point_input_dims=3): + super().__init__() + self.encoder_channel = encoder_channel + self.point_input_dims = point_input_dims + self.first_conv = nn.Sequential( + nn.Conv1d(self.point_input_dims, 128, 1), nn.BatchNorm1d(128), + nn.ReLU(inplace=True), nn.Conv1d(128, 256, 1)) + self.second_conv = nn.Sequential( + nn.Conv1d(512, 512, 1), nn.BatchNorm1d(512), nn.ReLU(inplace=True), + nn.Conv1d(512, self.encoder_channel, 1)) + + def forward(self, point_groups): + ''' + point_groups : B G N 3 + ----------------- + feature_global : B G C + ''' + bs, g, n, c = point_groups.shape + point_groups = point_groups.reshape(bs * g, n, c) + # encoder + feature = self.first_conv(point_groups.transpose(2, 1)) # BG 256 n + feature_global = torch.max(feature, dim=2, keepdim=True)[0] # BG 256 1 + feature = torch.cat([feature_global.expand(-1, -1, n), feature], + dim=1) # BG 512 n + feature = self.second_conv(feature) # BG 1024 n + feature_global = torch.max(feature, dim=2, keepdim=False)[0] # BG 1024 + return feature_global.reshape(bs, g, self.encoder_channel) diff --git a/models/LEO/model/pointbert/misc.py b/models/LEO/model/pointbert/misc.py new file mode 100644 index 0000000..beb7ba5 --- /dev/null +++ b/models/LEO/model/pointbert/misc.py @@ -0,0 +1,44 @@ +import torch + + +def index_points(points, idx): + """ + Input: + points: input points data, [B, N, C] + idx: sample index data, [B, S] + Return: + new_points:, indexed points data, [B, S, C] + """ + device = points.device + B = points.shape[0] + view_shape = list(idx.shape) + view_shape[1:] = [1] * (len(view_shape) - 1) + repeat_shape = list(idx.shape) + repeat_shape[0] = 1 + batch_indices = torch.arange( + B, dtype=torch.long).to(device).view(view_shape).repeat(repeat_shape) + new_points = points[batch_indices, idx, :] + return new_points + + +def fps(xyz, npoint): + """ + Input: + xyz: pointcloud data, [B, N, 3] + npoint: number of samples + Return: + centroids: sampled pointcloud index, [B, npoint] + """ + device = xyz.device + B, N, C = xyz.shape + centroids = torch.zeros(B, npoint, dtype=torch.long).to(device) + distance = torch.ones(B, N).to(device) * 1e10 + farthest = torch.randint(0, N, (B, ), dtype=torch.long).to(device) + batch_indices = torch.arange(B, dtype=torch.long).to(device) + for i in range(npoint): + centroids[:, i] = farthest + centroid = xyz[batch_indices, farthest, :].view(B, 1, 3) + dist = torch.sum((xyz - centroid)**2, -1) + distance = torch.min(distance, dist) + farthest = torch.max(distance, -1)[1] + return index_points(xyz, centroids) diff --git a/models/LEO/model/pointbert/pointbert.py b/models/LEO/model/pointbert/pointbert.py new file mode 100644 index 0000000..7600cfb --- /dev/null +++ b/models/LEO/model/pointbert/pointbert.py @@ -0,0 +1,219 @@ +import torch +import torch.nn as nn +from accelerate.logging import get_logger +from model.pointbert.dvae import Encoder, Group +from timm.models.layers import DropPath + +logger = get_logger(__name__) + + +class Mlp(nn.Module): + + def __init__(self, + in_features, + hidden_features=None, + out_features=None, + act_layer=nn.GELU, + drop=0.): + super().__init__() + out_features = out_features or in_features + hidden_features = hidden_features or in_features + self.fc1 = nn.Linear(in_features, hidden_features) + self.act = act_layer() + self.fc2 = nn.Linear(hidden_features, out_features) + self.drop = nn.Dropout(drop) + + def forward(self, x): + x = self.fc1(x) + x = self.act(x) + x = self.drop(x) + x = self.fc2(x) + x = self.drop(x) + return x + + +class Attention(nn.Module): + + def __init__(self, + dim, + num_heads=8, + qkv_bias=False, + qk_scale=None, + attn_drop=0., + proj_drop=0.): + super().__init__() + self.num_heads = num_heads + head_dim = dim // num_heads + # NOTE scale factor was wrong in my original version, can set manually to be compat with prev weights + self.scale = qk_scale or head_dim**-0.5 + + self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) + self.attn_drop = nn.Dropout(attn_drop) + self.proj = nn.Linear(dim, dim) + self.proj_drop = nn.Dropout(proj_drop) + + def forward(self, x): + B, N, C = x.shape + qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, + C // self.num_heads).permute(2, 0, 3, 1, 4) + q, k, v = qkv[0], qkv[1], qkv[ + 2] # make torchscript happy (cannot use tensor as tuple) + + attn = (q @ k.transpose(-2, -1)) * self.scale + attn = attn.softmax(dim=-1) + attn = self.attn_drop(attn) + + x = (attn @ v).transpose(1, 2).reshape(B, N, C) + x = self.proj(x) + x = self.proj_drop(x) + return x + + +class Block(nn.Module): + + def __init__(self, + dim, + num_heads, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop=0., + attn_drop=0., + drop_path=0., + act_layer=nn.GELU, + norm_layer=nn.LayerNorm): + super().__init__() + self.norm1 = norm_layer(dim) + + # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here + self.drop_path = DropPath( + drop_path) if drop_path > 0. else nn.Identity() + self.norm2 = norm_layer(dim) + mlp_hidden_dim = int(dim * mlp_ratio) + self.mlp = Mlp(in_features=dim, + hidden_features=mlp_hidden_dim, + act_layer=act_layer, + drop=drop) + + self.attn = Attention(dim, + num_heads=num_heads, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + attn_drop=attn_drop, + proj_drop=drop) + + def forward(self, x): + x = x + self.drop_path(self.attn(self.norm1(x))) + x = x + self.drop_path(self.mlp(self.norm2(x))) + return x + + +class TransformerEncoder(nn.Module): + """Transformer Encoder without hierarchical structure.""" + + def __init__(self, + embed_dim=768, + depth=4, + num_heads=12, + mlp_ratio=4., + qkv_bias=False, + qk_scale=None, + drop_rate=0., + attn_drop_rate=0., + drop_path_rate=0.): + super().__init__() + + self.blocks = nn.ModuleList([ + Block(dim=embed_dim, + num_heads=num_heads, + mlp_ratio=mlp_ratio, + qkv_bias=qkv_bias, + qk_scale=qk_scale, + drop=drop_rate, + attn_drop=attn_drop_rate, + drop_path=drop_path_rate[i] if isinstance( + drop_path_rate, list) else drop_path_rate) + for i in range(depth) + ]) + + def forward(self, x, pos): + for _, block in enumerate(self.blocks): + x = block(x + pos) + return x + + +class PointBERT(nn.Module): + + def __init__(self, + trans_dim, + depth, + drop_path_rate, + cls_dim, + num_heads, + group_size, + num_group, + encoder_dims, + add_RGB=False) -> None: + super().__init__() + self.trans_dim = trans_dim + self.depth = depth + self.drop_path_rate = drop_path_rate + self.cls_dim = cls_dim + self.num_heads = num_heads + + self.group_size = group_size + self.num_group = num_group + # grouper + self.group_divider = Group(num_group=self.num_group, + group_size=self.group_size) + # define the encoder + self.encoder_dims = encoder_dims + self.add_RGB = add_RGB + self.point_input_dims = 3 if self.add_RGB is False else 6 + logger.debug( + f'PointBERT: add_RGB = {add_RGB} point_input_dims = {self.point_input_dims}' + ) + self.encoder = Encoder(encoder_channel=self.encoder_dims, + point_input_dims=self.point_input_dims) + # bridge encoder and transformer + self.reduce_dim = nn.Linear(self.encoder_dims, self.trans_dim) + + self.cls_token = nn.Parameter(torch.zeros(1, 1, self.trans_dim)) + self.cls_pos = nn.Parameter(torch.randn(1, 1, self.trans_dim)) + + self.pos_embed = nn.Sequential(nn.Linear(3, 128), nn.GELU(), + nn.Linear(128, self.trans_dim)) + + dpr = [ + x.item() + for x in torch.linspace(0, self.drop_path_rate, self.depth) + ] + self.blocks = TransformerEncoder(embed_dim=self.trans_dim, + depth=self.depth, + drop_path_rate=dpr, + num_heads=self.num_heads) + + self.norm = nn.LayerNorm(self.trans_dim) + self.out_dim = trans_dim * 2 + self.proj_layer = nn.Linear(self.out_dim, self.out_dim, bias=False) + + def forward(self, pts): + # divide the point cloud in the same form. This is important + neighborhood, center = self.group_divider(pts) + # encoder the input cloud blocks + group_input_tokens = self.encoder(neighborhood) # B G N + group_input_tokens = self.reduce_dim(group_input_tokens) + # prepare cls + cls_tokens = self.cls_token.expand(group_input_tokens.size(0), -1, -1) + cls_pos = self.cls_pos.expand(group_input_tokens.size(0), -1, -1) + # add pos embedding + pos = self.pos_embed(center) + # final input + x = torch.cat((cls_tokens, group_input_tokens), dim=1) + pos = torch.cat((cls_pos, pos), dim=1) + # transformer + x = self.blocks(x, pos) + x = self.norm(x) + concat_f = torch.cat([x[:, 0], x[:, 1:].max(1)[0]], dim=-1) + # ret = self.cls_head_finetune(concat_f) + return concat_f diff --git a/models/LEO/model/pointnetpp/_ext_src/include/ball_query.h b/models/LEO/model/pointnetpp/_ext_src/include/ball_query.h new file mode 100755 index 0000000..1bbc638 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/include/ball_query.h @@ -0,0 +1,5 @@ +#pragma once +#include + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample); diff --git a/models/LEO/model/pointnetpp/_ext_src/include/cuda_utils.h b/models/LEO/model/pointnetpp/_ext_src/include/cuda_utils.h new file mode 100755 index 0000000..0fd5b6e --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/include/cuda_utils.h @@ -0,0 +1,41 @@ +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include +#include + +#include +#include + +#include + +#define TOTAL_THREADS 512 + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return max(min(1 << pow_2, TOTAL_THREADS), 1); +} + +inline dim3 opt_block_config(int x, int y) { + const int x_threads = opt_n_threads(x); + const int y_threads = + max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); + dim3 block_config(x_threads, y_threads, 1); + + return block_config; +} + +#define CUDA_CHECK_ERRORS() \ + do { \ + cudaError_t err = cudaGetLastError(); \ + if (cudaSuccess != err) { \ + fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ + cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ + __FILE__); \ + exit(-1); \ + } \ + } while (0) + +#endif diff --git a/models/LEO/model/pointnetpp/_ext_src/include/group_points.h b/models/LEO/model/pointnetpp/_ext_src/include/group_points.h new file mode 100755 index 0000000..ad20cda --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/include/group_points.h @@ -0,0 +1,5 @@ +#pragma once +#include + +at::Tensor group_points(at::Tensor points, at::Tensor idx); +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); diff --git a/models/LEO/model/pointnetpp/_ext_src/include/interpolate.h b/models/LEO/model/pointnetpp/_ext_src/include/interpolate.h new file mode 100755 index 0000000..26b3464 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/include/interpolate.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows); +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight); +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m); diff --git a/models/LEO/model/pointnetpp/_ext_src/include/sampling.h b/models/LEO/model/pointnetpp/_ext_src/include/sampling.h new file mode 100755 index 0000000..d795271 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/include/sampling.h @@ -0,0 +1,6 @@ +#pragma once +#include + +at::Tensor gather_points(at::Tensor points, at::Tensor idx); +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); diff --git a/models/LEO/model/pointnetpp/_ext_src/include/utils.h b/models/LEO/model/pointnetpp/_ext_src/include/utils.h new file mode 100755 index 0000000..5f080ed --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/include/utils.h @@ -0,0 +1,25 @@ +#pragma once +#include +#include + +#define CHECK_CUDA(x) \ + do { \ + AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \ + } while (0) + +#define CHECK_CONTIGUOUS(x) \ + do { \ + AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \ + } while (0) + +#define CHECK_IS_INT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \ + #x " must be an int tensor"); \ + } while (0) + +#define CHECK_IS_FLOAT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \ + #x " must be a float tensor"); \ + } while (0) diff --git a/models/LEO/model/pointnetpp/_ext_src/src/ball_query.cpp b/models/LEO/model/pointnetpp/_ext_src/src/ball_query.cpp new file mode 100755 index 0000000..b1797c1 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/ball_query.cpp @@ -0,0 +1,32 @@ +#include "ball_query.h" +#include "utils.h" + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx); + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample) { + CHECK_CONTIGUOUS(new_xyz); + CHECK_CONTIGUOUS(xyz); + CHECK_IS_FLOAT(new_xyz); + CHECK_IS_FLOAT(xyz); + + if (new_xyz.is_cuda()) { + CHECK_CUDA(xyz); + } + + at::Tensor idx = + torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, + at::device(new_xyz.device()).dtype(at::ScalarType::Int)); + + if (new_xyz.is_cuda()) { + query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), + radius, nsample, new_xyz.data_ptr(), + xyz.data_ptr(), idx.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return idx; +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/ball_query_gpu.cu b/models/LEO/model/pointnetpp/_ext_src/src/ball_query_gpu.cu new file mode 100755 index 0000000..559aef9 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/ball_query_gpu.cu @@ -0,0 +1,54 @@ +#include +#include +#include + +#include "cuda_utils.h" + +// input: new_xyz(b, m, 3) xyz(b, n, 3) +// output: idx(b, m, nsample) +__global__ void query_ball_point_kernel(int b, int n, int m, float radius, + int nsample, + const float *__restrict__ new_xyz, + const float *__restrict__ xyz, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + xyz += batch_index * n * 3; + new_xyz += batch_index * m * 3; + idx += m * nsample * batch_index; + + int index = threadIdx.x; + int stride = blockDim.x; + + float radius2 = radius * radius; + for (int j = index; j < m; j += stride) { + float new_x = new_xyz[j * 3 + 0]; + float new_y = new_xyz[j * 3 + 1]; + float new_z = new_xyz[j * 3 + 2]; + for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 < radius2) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[j * nsample + l] = k; + } + } + idx[j * nsample + cnt] = k; + ++cnt; + } + } + } +} + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + query_ball_point_kernel<<>>( + b, n, m, radius, nsample, new_xyz, xyz, idx); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/bindings.cpp b/models/LEO/model/pointnetpp/_ext_src/src/bindings.cpp new file mode 100755 index 0000000..d1916ce --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/bindings.cpp @@ -0,0 +1,19 @@ +#include "ball_query.h" +#include "group_points.h" +#include "interpolate.h" +#include "sampling.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("gather_points", &gather_points); + m.def("gather_points_grad", &gather_points_grad); + m.def("furthest_point_sampling", &furthest_point_sampling); + + m.def("three_nn", &three_nn); + m.def("three_interpolate", &three_interpolate); + m.def("three_interpolate_grad", &three_interpolate_grad); + + m.def("ball_query", &ball_query); + + m.def("group_points", &group_points); + m.def("group_points_grad", &group_points_grad); +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/group_points.cpp b/models/LEO/model/pointnetpp/_ext_src/src/group_points.cpp new file mode 100755 index 0000000..285a4bd --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/group_points.cpp @@ -0,0 +1,62 @@ +#include "group_points.h" +#include "utils.h" + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out); + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points); + +at::Tensor group_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), idx.size(2), + points.data_ptr(), idx.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + group_points_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), + grad_out.data_ptr(), idx.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/group_points_gpu.cu b/models/LEO/model/pointnetpp/_ext_src/src/group_points_gpu.cu new file mode 100755 index 0000000..57c2b1b --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/group_points_gpu.cu @@ -0,0 +1,75 @@ +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, npoints, nsample) +// output: out(b, c, npoints, nsample) +__global__ void group_points_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * n * c; + idx += batch_index * npoints * nsample; + out += batch_index * npoints * nsample * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + out[(l * npoints + j) * nsample + k] = points[l * n + ii]; + } + } +} + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_kernel<<>>( + b, c, n, npoints, nsample, points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) +// output: grad_points(b, c, n) +__global__ void group_points_grad_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * npoints * nsample * c; + idx += batch_index * npoints * nsample; + grad_points += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + atomicAdd(grad_points + l * n + ii, + grad_out[(l * npoints + j) * nsample + k]); + } + } +} + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_grad_kernel<<>>( + b, c, n, npoints, nsample, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/interpolate.cpp b/models/LEO/model/pointnetpp/_ext_src/src/interpolate.cpp new file mode 100755 index 0000000..cdee31c --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/interpolate.cpp @@ -0,0 +1,99 @@ +#include "interpolate.h" +#include "utils.h" + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx); +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out); +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points); + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows) { + CHECK_CONTIGUOUS(unknowns); + CHECK_CONTIGUOUS(knows); + CHECK_IS_FLOAT(unknowns); + CHECK_IS_FLOAT(knows); + + if (unknowns.is_cuda()) { + CHECK_CUDA(knows); + } + + at::Tensor idx = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Int)); + at::Tensor dist2 = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Float)); + + if (unknowns.is_cuda()) { + three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1), + unknowns.data_ptr(), knows.data_ptr(), + dist2.data_ptr(), idx.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return {dist2, idx}; +} + +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + three_interpolate_kernel_wrapper( + points.size(0), points.size(1), points.size(2), idx.size(1), + points.data_ptr(), idx.data_ptr(), weight.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), m}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + three_interpolate_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), grad_out.size(2), m, + grad_out.data_ptr(), idx.data_ptr(), + weight.data_ptr(), output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/interpolate_gpu.cu b/models/LEO/model/pointnetpp/_ext_src/src/interpolate_gpu.cu new file mode 100755 index 0000000..81c5548 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/interpolate_gpu.cu @@ -0,0 +1,154 @@ +#include +#include +#include + +#include "cuda_utils.h" + +// input: unknown(b, n, 3) known(b, m, 3) +// output: dist2(b, n, 3), idx(b, n, 3) +__global__ void three_nn_kernel(int b, int n, int m, + const float *__restrict__ unknown, + const float *__restrict__ known, + float *__restrict__ dist2, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + unknown += batch_index * n * 3; + known += batch_index * m * 3; + dist2 += batch_index * n * 3; + idx += batch_index * n * 3; + + int index = threadIdx.x; + int stride = blockDim.x; + for (int j = index; j < n; j += stride) { + float ux = unknown[j * 3 + 0]; + float uy = unknown[j * 3 + 1]; + float uz = unknown[j * 3 + 2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; + besti3 = besti2; + best2 = best1; + besti2 = besti1; + best1 = d; + besti1 = k; + } else if (d < best2) { + best3 = best2; + besti3 = besti2; + best2 = d; + besti2 = k; + } else if (d < best3) { + best3 = d; + besti3 = k; + } + } + dist2[j * 3 + 0] = best1; + dist2[j * 3 + 1] = best2; + dist2[j * 3 + 2] = best3; + + idx[j * 3 + 0] = besti1; + idx[j * 3 + 1] = besti2; + idx[j * 3 + 2] = besti3; + } +} + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_nn_kernel<<>>(b, n, m, unknown, known, + dist2, idx); + + CUDA_CHECK_ERRORS(); +} + +// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) +// output: out(b, c, n) +__global__ void three_interpolate_kernel(int b, int c, int m, int n, + const float *__restrict__ points, + const int *__restrict__ idx, + const float *__restrict__ weight, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * m * c; + + idx += batch_index * n * 3; + weight += batch_index * n * 3; + + out += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + + points[l * m + i3] * w3; + } +} + +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_kernel<<>>( + b, c, m, n, points, idx, weight, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) +// output: grad_points(b, c, m) + +__global__ void three_interpolate_grad_kernel( + int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, const float *__restrict__ weight, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * n * c; + idx += batch_index * n * 3; + weight += batch_index * n * 3; + grad_points += batch_index * m * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + atomicAdd(grad_points + l * m + i1, grad_out[i] * w1); + atomicAdd(grad_points + l * m + i2, grad_out[i] * w2); + atomicAdd(grad_points + l * m + i3, grad_out[i] * w3); + } +} + +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_grad_kernel<<>>( + b, c, n, m, grad_out, idx, weight, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/sampling.cpp b/models/LEO/model/pointnetpp/_ext_src/src/sampling.cpp new file mode 100755 index 0000000..ddbdc11 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/sampling.cpp @@ -0,0 +1,87 @@ +#include "sampling.h" +#include "utils.h" + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out); +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points); + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs); + +at::Tensor gather_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), points.data_ptr(), + idx.data_ptr(), output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, + const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n, + idx.size(1), grad_out.data_ptr(), + idx.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) { + CHECK_CONTIGUOUS(points); + CHECK_IS_FLOAT(points); + + at::Tensor output = + torch::zeros({points.size(0), nsamples}, + at::device(points.device()).dtype(at::ScalarType::Int)); + + at::Tensor tmp = + torch::full({points.size(0), points.size(1)}, 1e10, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + furthest_point_sampling_kernel_wrapper( + points.size(0), points.size(1), nsamples, points.data_ptr(), + tmp.data_ptr(), output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/LEO/model/pointnetpp/_ext_src/src/sampling_gpu.cu b/models/LEO/model/pointnetpp/_ext_src/src/sampling_gpu.cu new file mode 100755 index 0000000..fc573f0 --- /dev/null +++ b/models/LEO/model/pointnetpp/_ext_src/src/sampling_gpu.cu @@ -0,0 +1,229 @@ +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, m) +// output: out(b, c, m) +__global__ void gather_points_kernel(int b, int c, int n, int m, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; + } + } + } +} + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out) { + gather_points_kernel<<>>(b, c, n, npoints, + points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, m) idx(b, m) +// output: grad_points(b, c, n) +__global__ void gather_points_grad_kernel(int b, int c, int n, int m, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + atomicAdd(grad_points + (i * c + l) * n + a, + grad_out[(i * c + l) * m + j]); + } + } + } +} + +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points) { + gather_points_grad_kernel<<>>( + b, c, n, npoints, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, + int idx1, int idx2) { + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +// Input dataset: (b, n, 3), tmp: (b, n) +// Ouput idxs (b, m) +template +__global__ void furthest_point_sampling_kernel( + int b, int n, int m, const float *__restrict__ dataset, + float *__restrict__ temp, int *__restrict__ idxs) { + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + if (mag <= 1e-3) continue; + + float d = + (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) idxs[j] = old; + } +} + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs) { + unsigned int n_threads = opt_n_threads(n); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + switch (n_threads) { + case 512: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 256: + furthest_point_sampling_kernel<256> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 128: + furthest_point_sampling_kernel<128> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 64: + furthest_point_sampling_kernel<64> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 32: + furthest_point_sampling_kernel<32> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 16: + furthest_point_sampling_kernel<16> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 8: + furthest_point_sampling_kernel<8> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 4: + furthest_point_sampling_kernel<4> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 2: + furthest_point_sampling_kernel<2> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 1: + furthest_point_sampling_kernel<1> + <<>>(b, n, m, dataset, temp, idxs); + break; + default: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + } + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LEO/model/pointnetpp/_version.py b/models/LEO/model/pointnetpp/_version.py new file mode 100755 index 0000000..4eb28e3 --- /dev/null +++ b/models/LEO/model/pointnetpp/_version.py @@ -0,0 +1 @@ +__version__ = '3.0.0' diff --git a/models/LEO/model/pointnetpp/pointnet2_modules.py b/models/LEO/model/pointnetpp/pointnet2_modules.py new file mode 100755 index 0000000..2631d57 --- /dev/null +++ b/models/LEO/model/pointnetpp/pointnet2_modules.py @@ -0,0 +1,526 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Pointnet2 layers. + +Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch +Extended with the following: +1. Uniform sampling in each local region (sample_uniformly) +2. Return sampled points indices to support votenet. +""" +import os +import sys + +import torch +import torch.nn as nn +import torch.nn.functional as F + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +from typing import List + +import pointnet2_utils +import pytorch_utils as pt_utils + + +class _PointnetSAModuleBase(nn.Module): + + def __init__(self): + super().__init__() + self.npoint = None + self.groupers = None + self.mlps = None + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, N, C) tensor of the descriptors of the the features + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, pointnet2_utils.furthest_point_sample( + xyz, self.npoint)).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features) # (B, C, npoint, nsample) + + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1) + + +class PointnetSAModuleMSG(_PointnetSAModuleBase): + r"""Pointnet set abstrction layer with multiscale grouping + + Parameters + ---------- + npoint : int + Number of features + radii : list of float32 + list of radii to group with + nsamples : list of int32 + Number of samples in each ball query + mlps : list of list of int32 + Spec of the pointnet before the global max_pool for each scale + bn : bool + Use batchnorm + """ + + def __init__(self, + *, + npoint: int, + radii: List[float], + nsamples: List[int], + mlps: List[List[int]], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly + ) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + +class PointnetSAModule(PointnetSAModuleMSG): + r"""Pointnet set abstrction layer + + Parameters + ---------- + npoint : int + Number of features + radius : float + Radius of ball + nsample : int + Number of samples in the ball query + mlp : list + Spec of the pointnet before the global max_pool + bn : bool + Use batchnorm + """ + + def __init__(self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True): + super().__init__(mlps=[mlp], + npoint=npoint, + radii=[radius], + nsamples=[nsample], + bn=bn, + use_xyz=use_xyz) + + +class PointnetSAModuleVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG with + extra support for returning point indices for getting their GT votes.""" + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True, + pooling: str = 'max', + sigma: float = None, # for RBF pooling + normalize_xyz: bool = False, # noramlize local XYZ with radius + sample_uniformly: bool = False, + ret_unique_cnt: bool = False): + super().__init__() + + self.npoint = npoint + self.radius = radius + self.nsample = nsample + self.pooling = pooling + self.mlp_module = None + self.use_xyz = use_xyz + self.sigma = sigma + if self.sigma is None: + self.sigma = self.radius / 2 + self.normalize_xyz = normalize_xyz + self.ret_unique_cnt = ret_unique_cnt + + if npoint is not None: + self.grouper = pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + ret_grouped_xyz=True, + normalize_xyz=normalize_xyz, + sample_uniformly=sample_uniformly, + ret_unique_cnt=ret_unique_cnt) + else: + self.grouper = pointnet2_utils.GroupAll(use_xyz, + ret_grouped_xyz=True) + + mlp_spec = mlp + if use_xyz and len(mlp_spec) > 0: + mlp_spec[0] += 3 + self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, N) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + else: + assert (inds.shape[1] == self.npoint) + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, inds).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + if not self.ret_unique_cnt: + grouped_features, grouped_xyz = self.grouper( + xyz, new_xyz, features) # (B, C, npoint, nsample) + else: + grouped_features, grouped_xyz, unique_cnt = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint) + + new_features = self.mlp_module( + grouped_features) # (B, mlp[-1], npoint, nsample) + if self.pooling == 'max': + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + elif self.pooling == 'avg': + new_features = F.avg_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + elif self.pooling == 'rbf': + # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma) + # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel + rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1, keepdim=False) / + (self.sigma**2) / 2) # (B, npoint, nsample) + new_features = torch.sum( + new_features * rbf.unsqueeze(1), -1, keepdim=True) / float( + self.nsample) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + if not self.ret_unique_cnt: + return new_xyz, new_features, inds + else: + return new_xyz, new_features, inds, unique_cnt + + +class PointnetSAModuleMSGVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG with + extra support for returning point indices for getting their GT votes.""" + + def __init__(self, + *, + mlps: List[List[int]], + npoint: int, + radii: List[float], + nsamples: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert (len(mlps) == len(nsamples) == len(radii)) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly + ) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, C) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, inds).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features) # (B, C, npoint, nsample) + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), inds + + +class PointnetFPModule(nn.Module): + r"""Propigates the features of one set to another + + Parameters + ---------- + mlp : list + Pointnet module parameters + bn : bool + Use batchnorm + """ + + def __init__(self, *, mlp: List[int], bn: bool = True): + super().__init__() + self.mlp = pt_utils.SharedMLP(mlp, bn=bn) + + def forward(self, unknown: torch.Tensor, known: torch.Tensor, + unknow_feats: torch.Tensor, + known_feats: torch.Tensor) -> torch.Tensor: + r""" + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of the xyz positions of the unknown features + known : torch.Tensor + (B, m, 3) tensor of the xyz positions of the known features + unknow_feats : torch.Tensor + (B, C1, n) tensor of the features to be propigated to + known_feats : torch.Tensor + (B, C2, m) tensor of features to be propigated + + Returns + ------- + new_features : torch.Tensor + (B, mlp[-1], n) tensor of the features of the unknown features + """ + + if known is not None: + dist, idx = pointnet2_utils.three_nn(unknown, known) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate( + known_feats, idx, weight) + else: + interpolated_feats = known_feats.expand(*known_feats.size()[0:2], + unknown.size(1)) + + if unknow_feats is not None: + new_features = torch.cat([interpolated_feats, unknow_feats], + dim=1) #(B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlp(new_features) + + return new_features.squeeze(-1) + + +class PointnetLFPModuleMSG(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + learnable feature propagation layer.""" + + def __init__(self, + *, + mlps: List[List[int]], + radii: List[float], + nsamples: List[int], + post_mlp: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert (len(mlps) == len(nsamples) == len(radii)) + + self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor, + features2: torch.Tensor, + features1: torch.Tensor) -> torch.Tensor: + r""" Propagate features from xyz1 to xyz2. + Parameters + ---------- + xyz2 : torch.Tensor + (B, N2, 3) tensor of the xyz coordinates of the features + xyz1 : torch.Tensor + (B, N1, 3) tensor of the xyz coordinates of the features + features2 : torch.Tensor + (B, C2, N2) tensor of the descriptors of the the features + features1 : torch.Tensor + (B, C1, N1) tensor of the descriptors of the the features + + Returns + ------- + new_features1 : torch.Tensor + (B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors + """ + new_features_list = [] + + for i in range(len(self.groupers)): + new_features = self.groupers[i](xyz1, xyz2, + features1) # (B, C1, N2, nsample) + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], N2, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[1, + new_features.size(3) + ]) # (B, mlp[-1], N2, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], N2) + + if features2 is not None: + new_features = torch.cat([new_features, features2], + dim=1) #(B, mlp[-1] + C2, N2) + + new_features = new_features.unsqueeze(-1) + new_features = self.post_mlp(new_features) + + new_features_list.append(new_features) + + return torch.cat(new_features_list, dim=1).squeeze(-1) + + +if __name__ == '__main__': + from torch.autograd import Variable + torch.manual_seed(1) + torch.cuda.manual_seed_all(1) + xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True) + xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True) + + test_module = PointnetSAModuleMSG(npoint=2, + radii=[5.0, 10.0], + nsamples=[6, 3], + mlps=[[9, 3], [9, 6]]) + test_module.cuda() + print(test_module(xyz, xyz_feats)) + + for _ in range(1): + _, new_features = test_module(xyz, xyz_feats) + new_features.backward( + torch.cuda.FloatTensor(*new_features.size()).fill_(1)) + print(new_features) + print(xyz.grad) diff --git a/models/LEO/model/pointnetpp/pointnet2_test.py b/models/LEO/model/pointnetpp/pointnet2_test.py new file mode 100755 index 0000000..6c7335b --- /dev/null +++ b/models/LEO/model/pointnetpp/pointnet2_test.py @@ -0,0 +1,38 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Testing customized ops.""" + +import os +import sys + +import numpy as np +import torch +from torch.autograd import gradcheck + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +import pointnet2_utils + + +def test_interpolation_grad(): + batch_size = 1 + feat_dim = 2 + m = 4 + feats = torch.randn(batch_size, feat_dim, m, + requires_grad=True).float().cuda() + + def interpolate_func(inputs): + idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() + weight = torch.from_numpy(np.array([[[1, 1, 1], [2, 2, + 2]]])).float().cuda() + interpolated_feats = pointnet2_utils.three_interpolate( + inputs, idx, weight) + return interpolated_feats + + assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1)) + + +if __name__ == '__main__': + test_interpolation_grad() diff --git a/models/LEO/model/pointnetpp/pointnet2_utils.py b/models/LEO/model/pointnetpp/pointnet2_utils.py new file mode 100755 index 0000000..13bf48d --- /dev/null +++ b/models/LEO/model/pointnetpp/pointnet2_utils.py @@ -0,0 +1,427 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals, with_statement) + +import builtins + +import model.pointnetpp.pytorch_utils as pt_utils +import torch +import torch.nn as nn +from torch.autograd import Function + +try: + import pointnet2._ext as _ext +except ImportError: + if not getattr(builtins, '__POINTNET2_SETUP__', False): + raise ImportError( + 'Could not import _ext module.\n' + 'Please see the setup instructions in the README: ' + 'https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst' + ) + +if False: + # Workaround for type hints without depending on the `typing` module + from typing import * + + +class RandomDropout(nn.Module): + + def __init__(self, p=0.5, inplace=False): + super(RandomDropout, self).__init__() + self.p = p + self.inplace = inplace + + def forward(self, X): + theta = torch.Tensor(1).uniform_(0, self.p)[0] + return pt_utils.feature_dropout_no_scaling(X, theta, self.train, + self.inplace) + + +class FurthestPointSampling(Function): + + @staticmethod + def forward(ctx, xyz, npoint): + # type: (Any, torch.Tensor, int) -> torch.Tensor + r""" + Uses iterative furthest point sampling to select a set of npoint features that have the largest + minimum distance + + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor where N > npoint + npoint : int32 + number of features in the sampled set + + Returns + ------- + torch.Tensor + (B, npoint) tensor containing the set + """ + fps_inds = _ext.furthest_point_sampling(xyz, npoint) + ctx.mark_non_differentiable(fps_inds) + return fps_inds + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + + +class GatherOperation(Function): + + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor + + idx : torch.Tensor + (B, npoint) tensor of the features to gather + + Returns + ------- + torch.Tensor + (B, C, npoint) tensor + """ + + _, C, N = features.size() + + ctx.for_backwards = (idx, C, N) + + return _ext.gather_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + + grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) + return grad_features, None + + +gather_operation = GatherOperation.apply + + +class ThreeNN(Function): + + @staticmethod + def forward(ctx, unknown, known): + # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + Find the three nearest neighbors of unknown in known + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of known features + known : torch.Tensor + (B, m, 3) tensor of unknown features + + Returns + ------- + dist : torch.Tensor + (B, n, 3) l2 distance to the three nearest neighbors + idx : torch.Tensor + (B, n, 3) index of 3 nearest neighbors + """ + dist2, idx = _ext.three_nn(unknown, known) + + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + def forward(ctx, features, idx, weight): + # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor + r""" + Performs weight linear interpolation on 3 features + Parameters + ---------- + features : torch.Tensor + (B, c, m) Features descriptors to be interpolated from + idx : torch.Tensor + (B, n, 3) three nearest neighbors of the target features in features + weight : torch.Tensor + (B, n, 3) weights + + Returns + ------- + torch.Tensor + (B, c, n) tensor of the interpolated features + """ + B, c, m = features.size() + n = idx.size(1) + + ctx.three_interpolate_for_backward = (idx, weight, m) + + return _ext.three_interpolate(features, idx, weight) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + r""" + Parameters + ---------- + grad_out : torch.Tensor + (B, c, n) tensor with gradients of ouputs + + Returns + ------- + grad_features : torch.Tensor + (B, c, m) tensor with gradients of features + + None + + None + """ + idx, weight, m = ctx.three_interpolate_for_backward + + grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, + weight, m) + + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class GroupingOperation(Function): + + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor of features to group + idx : torch.Tensor + (B, npoint, nsample) tensor containing the indicies of features to group with + + Returns + ------- + torch.Tensor + (B, C, npoint, nsample) tensor + """ + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + + ctx.for_backwards = (idx, N) + + return _ext.group_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + + Parameters + ---------- + grad_out : torch.Tensor + (B, C, npoint, nsample) tensor of the gradients of the output from forward + + Returns + ------- + torch.Tensor + (B, C, N) gradient of the features + None + """ + idx, N = ctx.for_backwards + + grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) + + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius, nsample, xyz, new_xyz): + # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + radius : float + radius of the balls + nsample : int + maximum number of features in the balls + xyz : torch.Tensor + (B, N, 3) xyz coordinates of the features + new_xyz : torch.Tensor + (B, npoint, 3) centers of the ball query + + Returns + ------- + torch.Tensor + (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + inds = _ext.ball_query(new_xyz, xyz, radius, nsample) + ctx.mark_non_differentiable(inds) + return inds + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + r""" + Groups with a ball query of radius + + Parameters + --------- + radius : float32 + Radius of ball + nsample : int32 + Maximum number of features to gather in the ball + """ + + def __init__(self, + radius, + nsample, + use_xyz=True, + ret_grouped_xyz=False, + normalize_xyz=False, + sample_uniformly=False, + ret_unique_cnt=False): + # type: (QueryAndGroup, float, int, bool) -> None + super(QueryAndGroup, self).__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + self.ret_grouped_xyz = ret_grouped_xyz + self.normalize_xyz = normalize_xyz + self.sample_uniformly = sample_uniformly + self.ret_unique_cnt = ret_unique_cnt + if self.ret_unique_cnt: + assert (self.sample_uniformly) + + def forward(self, xyz, new_xyz, features=None): + # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + centriods (B, npoint, 3) + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, 3 + C, npoint, nsample) tensor + """ + idx = ball_query(self.radius, self.nsample, xyz, new_xyz) + + if self.sample_uniformly: + unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) + for i_batch in range(idx.shape[0]): + for i_region in range(idx.shape[1]): + unique_ind = torch.unique(idx[i_batch, i_region, :]) + num_unique = unique_ind.shape[0] + unique_cnt[i_batch, i_region] = num_unique + sample_ind = torch.randint(0, + num_unique, + (self.nsample - num_unique, ), + dtype=torch.long) + all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) + idx[i_batch, i_region, :] = all_ind + + xyz_trans = xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation(xyz_trans, + idx) # (B, 3, npoint, nsample) + grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) + if self.normalize_xyz: + grouped_xyz /= self.radius + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) # (B, C + 3, npoint, nsample) + else: + new_features = grouped_features + else: + assert (self.use_xyz + ), 'Cannot have not features and not use xyz as a feature!' + new_features = grouped_xyz + + ret = [new_features] + if self.ret_grouped_xyz: + ret.append(grouped_xyz) + if self.ret_unique_cnt: + ret.append(unique_cnt) + if len(ret) == 1: + return ret[0] + else: + return tuple(ret) + + +class GroupAll(nn.Module): + r""" + Groups all features + + Parameters + --------- + """ + + def __init__(self, use_xyz=True, ret_grouped_xyz=False): + # type: (GroupAll, bool) -> None + super(GroupAll, self).__init__() + self.use_xyz = use_xyz + + def forward(self, xyz, new_xyz, features=None): + # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + Ignored + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, C + 3, 1, N) tensor + """ + + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) # (B, 3 + C, 1, N) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + return new_features diff --git a/models/LEO/model/pointnetpp/pointnetpp.py b/models/LEO/model/pointnetpp/pointnetpp.py new file mode 100755 index 0000000..047da0b --- /dev/null +++ b/models/LEO/model/pointnetpp/pointnetpp.py @@ -0,0 +1,64 @@ +import torch.nn as nn +from model.pointnetpp.pointnet2_modules import PointnetSAModule + + +def break_up_pc(pc): + """Split the pointcloud into xyz positions and features tensors. This + method is taken from VoteNet codebase + (https://github.com/facebookresearch/votenet) + + @param pc: pointcloud [N, 3 + C] + :return: the xyz tensor and the feature tensor + """ + xyz = pc[..., 0:3].contiguous() + features = (pc[..., 3:].transpose(1, 2).contiguous() + if pc.size(-1) > 3 else None) + return xyz, features + + +class PointNetPP(nn.Module): + """Pointnet++ encoder. + + For the hyper parameters please advise the paper + (https://arxiv.org/abs/1706.02413) + """ + + def __init__(self, + sa_n_points: list, + sa_n_samples: list, + sa_radii: list, + sa_mlps: list, + bn=True, + use_xyz=True): + super().__init__() + + n_sa = len(sa_n_points) + if not (n_sa == len(sa_n_samples) == len(sa_radii) == len(sa_mlps)): + raise ValueError('Lens of given hyper-params are not compatible') + + self.encoder = nn.ModuleList() + + for i in range(n_sa): + self.encoder.append( + PointnetSAModule( + npoint=sa_n_points[i], + nsample=sa_n_samples[i], + radius=sa_radii[i], + mlp=sa_mlps[i], + bn=bn, + use_xyz=use_xyz, + )) + + out_n_points = sa_n_points[-1] if sa_n_points[-1] is not None else 1 + self.out_dim = sa_mlps[-1][-1] + self.fc = nn.Linear(out_n_points * sa_mlps[-1][-1], self.out_dim) + + def forward(self, features): + """ + @param features: B x N_objects x N_Points x 3 + C + """ + xyz, features = break_up_pc(features) + for i in range(len(self.encoder)): + xyz, features = self.encoder[i](xyz, features) + + return self.fc(features.view(features.size(0), -1)) diff --git a/models/LEO/model/pointnetpp/pytorch_utils.py b/models/LEO/model/pointnetpp/pytorch_utils.py new file mode 100755 index 0000000..7923b68 --- /dev/null +++ b/models/LEO/model/pointnetpp/pytorch_utils.py @@ -0,0 +1,273 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch.""" +from typing import List, Tuple + +import torch +import torch.nn as nn + + +class SharedMLP(nn.Sequential): + + def __init__(self, + args: List[int], + *, + bn: bool = False, + activation=nn.ReLU(inplace=True), + preact: bool = False, + first: bool = False, + name: str = ''): + super().__init__() + + for i in range(len(args) - 1): + self.add_module( + name + 'layer{}'.format(i), + Conv2d(args[i], + args[i + 1], + bn=(not first or not preact or (i != 0)) and bn, + activation=activation if + (not first or not preact or (i != 0)) else None, + preact=preact)) + + +class _BNBase(nn.Sequential): + + def __init__(self, in_size, batch_norm=None, name=''): + super().__init__() + self.add_module(name + 'bn', batch_norm(in_size)) + + nn.init.constant_(self[0].weight, 1.0) + nn.init.constant_(self[0].bias, 0) + + +class BatchNorm1d(_BNBase): + + def __init__(self, in_size: int, *, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) + + +class BatchNorm2d(_BNBase): + + def __init__(self, in_size: int, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) + + +class BatchNorm3d(_BNBase): + + def __init__(self, in_size: int, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name) + + +class _ConvBase(nn.Sequential): + + def __init__(self, + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=None, + batch_norm=None, + bias=True, + preact=False, + name=''): + super().__init__() + + bias = bias and (not bn) + conv_unit = conv(in_size, + out_size, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias) + init(conv_unit.weight) + if bias: + nn.init.constant_(conv_unit.bias, 0) + + if bn: + if not preact: + bn_unit = batch_norm(out_size) + else: + bn_unit = batch_norm(in_size) + + if preact: + if bn: + self.add_module(name + 'bn', bn_unit) + + if activation is not None: + self.add_module(name + 'activation', activation) + + self.add_module(name + 'conv', conv_unit) + + if not preact: + if bn: + self.add_module(name + 'bn', bn_unit) + + if activation is not None: + self.add_module(name + 'activation', activation) + + +class Conv1d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv1d, + batch_norm=BatchNorm1d, + bias=bias, + preact=preact, + name=name) + + +class Conv2d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int] = (1, 1), + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv2d, + batch_norm=BatchNorm2d, + bias=bias, + preact=preact, + name=name) + + +class Conv3d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int, int] = (1, 1, 1), + stride: Tuple[int, int, int] = (1, 1, 1), + padding: Tuple[int, int, int] = (0, 0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv3d, + batch_norm=BatchNorm3d, + bias=bias, + preact=preact, + name=name) + + +class FC(nn.Sequential): + + def __init__(self, + in_size: int, + out_size: int, + *, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=None, + preact: bool = False, + name: str = ''): + super().__init__() + + fc = nn.Linear(in_size, out_size, bias=not bn) + if init is not None: + init(fc.weight) + if not bn: + nn.init.constant_(fc.bias, 0) + + if preact: + if bn: + self.add_module(name + 'bn', BatchNorm1d(in_size)) + + if activation is not None: + self.add_module(name + 'activation', activation) + + self.add_module(name + 'fc', fc) + + if not preact: + if bn: + self.add_module(name + 'bn', BatchNorm1d(out_size)) + + if activation is not None: + self.add_module(name + 'activation', activation) + + +def set_bn_momentum_default(bn_momentum): + + def fn(m): + if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.momentum = bn_momentum + + return fn + + +class BNMomentumScheduler(object): + + def __init__(self, + model, + bn_lambda, + last_epoch=-1, + setter=set_bn_momentum_default): + if not isinstance(model, nn.Module): + raise RuntimeError("Class '{}' is not a PyTorch nn Module".format( + type(model).__name__)) + + self.model = model + self.setter = setter + self.lmbd = bn_lambda + + self.step(last_epoch + 1) + self.last_epoch = last_epoch + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + + self.last_epoch = epoch + self.model.apply(self.setter(self.lmbd(epoch))) diff --git a/models/LEO/model/pointnetpp/setup.py b/models/LEO/model/pointnetpp/setup.py new file mode 100755 index 0000000..c806d9d --- /dev/null +++ b/models/LEO/model/pointnetpp/setup.py @@ -0,0 +1,38 @@ +import glob +import os +import os.path as osp + +from setuptools import find_packages, setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +_this_dir = osp.dirname(osp.abspath(__file__)) +_ext_src_root = '_ext_src' +_ext_sources = glob.glob('{}/src/*.cpp'.format(_ext_src_root)) + glob.glob( + '{}/src/*.cu'.format(_ext_src_root)) +_ext_headers = glob.glob('{}/include/*'.format(_ext_src_root)) + +requirements = ['torch>=1.4'] + +os.environ['TORCH_CUDA_ARCH_LIST'] = '3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5' + +exec(open('_version.py').read()) + +setup( + name='pointnet2', + version=__version__, + packages=find_packages(), + install_requires=requirements, + ext_modules=[ + CUDAExtension( + name='pointnet2._ext', + sources=_ext_sources, + extra_compile_args={ + 'cxx': ['-O3'], + 'nvcc': ['-O3', '-Xfatbin', '-compress-all'], + }, + include_dirs=[osp.join(_this_dir, _ext_src_root, 'include')], + ) + ], + cmdclass={'build_ext': BuildExtension}, + include_package_data=True, +) diff --git a/models/LEO/model/pointnext/cpp/__init__.py b/models/LEO/model/pointnext/cpp/__init__.py new file mode 100644 index 0000000..0ab5e18 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/__init__.py @@ -0,0 +1,6 @@ +""" +Author: PointNeXt + +""" + +from .pointnet2_batch import pointnet2_cuda diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/__init__.py b/models/LEO/model/pointnext/cpp/pointnet2_batch/__init__.py new file mode 100644 index 0000000..4ac6f15 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/__init__.py @@ -0,0 +1,3 @@ +import torch + +from . import pointnet2_batch_cuda as pointnet2_cuda diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/setup.py b/models/LEO/model/pointnext/cpp/pointnet2_batch/setup.py new file mode 100644 index 0000000..559543c --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/setup.py @@ -0,0 +1,22 @@ +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +setup(name='pointnet2_cuda', + ext_modules=[ + CUDAExtension('pointnet2_batch_cuda', [ + 'src/pointnet2_api.cpp', + 'src/ball_query.cpp', + 'src/ball_query_gpu.cu', + 'src/group_points.cpp', + 'src/group_points_gpu.cu', + 'src/interpolate.cpp', + 'src/interpolate_gpu.cu', + 'src/sampling.cpp', + 'src/sampling_gpu.cu', + ], + extra_compile_args={ + 'cxx': ['-g'], + 'nvcc': ['-O2'] + }) + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query.cpp b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query.cpp new file mode 100644 index 0000000..302f8a1 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query.cpp @@ -0,0 +1,39 @@ +/* +batch version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include +#include "ball_query_gpu.h" + +#define CHECK_CUDA(x) do { \ + if (!x.type().is_cuda()) { \ + fprintf(stderr, "%s must be CUDA tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_CONTIGUOUS(x) do { \ + if (!x.is_contiguous()) { \ + fprintf(stderr, "%s must be contiguous tensor at %s:%d\n", #x, __FILE__, __LINE__); \ + exit(-1); \ + } \ +} while (0) +#define CHECK_INPUT(x) CHECK_CUDA(x);CHECK_CONTIGUOUS(x) + + +int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor) { + CHECK_INPUT(new_xyz_tensor); + CHECK_INPUT(xyz_tensor); + const float *new_xyz = new_xyz_tensor.data(); + const float *xyz = xyz_tensor.data(); + int *idx = idx_tensor.data(); + + ball_query_kernel_launcher_fast(b, n, m, radius, nsample, new_xyz, xyz, idx); + return 1; +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query_gpu.cu b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query_gpu.cu new file mode 100644 index 0000000..870ca12 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query_gpu.cu @@ -0,0 +1,73 @@ +/* +batch version of ball query, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include +#include + +#include "ball_query_gpu.h" +#include "cuda_utils.h" + + +__global__ void ball_query_kernel_fast(int b, int n, int m, float radius, int nsample, + const float *__restrict__ new_xyz, const float *__restrict__ xyz, int *__restrict__ idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= m) return; + + new_xyz += bs_idx * m * 3 + pt_idx * 3; + xyz += bs_idx * n * 3; + idx += bs_idx * m * nsample + pt_idx * nsample; + + float radius2 = radius * radius; + float new_x = new_xyz[0]; + float new_y = new_xyz[1]; + float new_z = new_xyz[2]; + + int cnt = 0; + for (int k = 0; k < n; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + (new_z - z) * (new_z - z); + if (d2 < radius2){ + if (cnt == 0){ + for (int l = 0; l < nsample; ++l) { + idx[l] = k; + } + } + idx[cnt] = k; + ++cnt; + if (cnt >= nsample) break; + } + } +} + + +void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, \ + const float *new_xyz, const float *xyz, int *idx) { + // new_xyz: (B, M, 3) + // xyz: (B, N, 3) + // output: + // idx: (B, M, nsample) + + cudaError_t err; + + dim3 blocks(DIVUP(m, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + ball_query_kernel_fast<<>>(b, n, m, radius, nsample, new_xyz, xyz, idx); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query_gpu.h b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query_gpu.h new file mode 100644 index 0000000..7b18198 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/ball_query_gpu.h @@ -0,0 +1,15 @@ +#ifndef _BALL_QUERY_GPU_H +#define _BALL_QUERY_GPU_H + +#include +#include +#include +#include + +int ball_query_wrapper_fast(int b, int n, int m, float radius, int nsample, + at::Tensor new_xyz_tensor, at::Tensor xyz_tensor, at::Tensor idx_tensor); + +void ball_query_kernel_launcher_fast(int b, int n, int m, float radius, int nsample, + const float *xyz, const float *new_xyz, int *idx); + +#endif diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/cuda_utils.h b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/cuda_utils.h new file mode 100644 index 0000000..7fe2796 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/cuda_utils.h @@ -0,0 +1,15 @@ +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include + +#define TOTAL_THREADS 1024 +#define THREADS_PER_BLOCK 256 +#define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return max(min(1 << pow_2, TOTAL_THREADS), 1); +} +#endif diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points.cpp b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points.cpp new file mode 100644 index 0000000..a6b60d7 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points.cpp @@ -0,0 +1,35 @@ +/* +batch version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include +#include +#include +#include "group_points_gpu.h" + +int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) +{ + + float *grad_points = grad_points_tensor.data(); + const int *idx = idx_tensor.data(); + const float *grad_out = grad_out_tensor.data(); + + group_points_grad_kernel_launcher_fast(b, c, n, npoints, nsample, grad_out, idx, grad_points); + return 1; +} + +int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor) +{ + + const float *points = points_tensor.data(); + const int *idx = idx_tensor.data(); + float *out = out_tensor.data(); + + group_points_kernel_launcher_fast(b, c, n, npoints, nsample, points, idx, out); + return 1; +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points_gpu.cu b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points_gpu.cu new file mode 100644 index 0000000..a13c0a3 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points_gpu.cu @@ -0,0 +1,92 @@ +/* +batch version of point grouping, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + +#include +#include + +#include "cuda_utils.h" +#include "group_points_gpu.h" + + +__global__ void group_points_grad_kernel_fast(int b, int c, int n, int npoints, int nsample, + const float *__restrict__ grad_out, const int *__restrict__ idx, float *__restrict__ grad_points) { + // grad_out: (B, C, npoints, nsample) + // idx: (B, npoints, nsample) + // output: + // grad_points: (B, C, N) + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int pt_idx = index / nsample; + if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; + + int sample_idx = index % nsample; + grad_out += bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; + idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; + + atomicAdd(grad_points + bs_idx * c * n + c_idx * n + idx[0] , grad_out[0]); +} + +void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *grad_out, const int *idx, float *grad_points) { + // grad_out: (B, C, npoints, nsample) + // idx: (B, npoints, nsample) + // output: + // grad_points: (B, C, N) + cudaError_t err; + dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_grad_kernel_fast<<>>(b, c, n, npoints, nsample, grad_out, idx, grad_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void group_points_kernel_fast(int b, int c, int n, int npoints, int nsample, + const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { + // points: (B, C, N) + // idx: (B, npoints, nsample) + // output: + // out: (B, C, npoints, nsample) + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int index = blockIdx.x * blockDim.x + threadIdx.x; + int pt_idx = index / nsample; + if (bs_idx >= b || c_idx >= c || pt_idx >= npoints) return; + + int sample_idx = index % nsample; + + idx += bs_idx * npoints * nsample + pt_idx * nsample + sample_idx; + int in_idx = bs_idx * c * n + c_idx * n + idx[0]; + int out_idx = bs_idx * c * npoints * nsample + c_idx * npoints * nsample + pt_idx * nsample + sample_idx; + + out[out_idx] = points[in_idx]; +} + + +void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, float *out) { + // points: (B, C, N) + // idx: (B, npoints, nsample) + // output: + // out: (B, C, npoints, nsample) + cudaError_t err; + dim3 blocks(DIVUP(npoints * nsample, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + group_points_kernel_fast<<>>(b, c, n, npoints, nsample, points, idx, out); + // cudaDeviceSynchronize(); // for using printf in kernel function + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points_gpu.h b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points_gpu.h new file mode 100644 index 0000000..6eb7225 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/group_points_gpu.h @@ -0,0 +1,22 @@ +#ifndef _GROUP_POINTS_GPU_H +#define _GROUP_POINTS_GPU_H + +#include +#include +#include +#include + + +int group_points_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); + +void group_points_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, float *out); + +int group_points_grad_wrapper_fast(int b, int c, int n, int npoints, int nsample, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); + +void group_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, int nsample, + const float *grad_out, const int *idx, float *grad_points); + +#endif diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate.cpp b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate.cpp new file mode 100644 index 0000000..ed59e01 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate.cpp @@ -0,0 +1,57 @@ +/* +batch version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include +#include +#include +#include +#include "interpolate_gpu.h" + + + + +void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, + at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor) { + const float *unknown = unknown_tensor.data(); + const float *known = known_tensor.data(); + float *dist2 = dist2_tensor.data(); + int *idx = idx_tensor.data(); + + three_nn_kernel_launcher_fast(b, n, m, unknown, known, dist2, idx); +} + + +void three_interpolate_wrapper_fast(int b, int c, int m, int n, + at::Tensor points_tensor, + at::Tensor idx_tensor, + at::Tensor weight_tensor, + at::Tensor out_tensor) { + + const float *points = points_tensor.data(); + const float *weight = weight_tensor.data(); + float *out = out_tensor.data(); + const int *idx = idx_tensor.data(); + + three_interpolate_kernel_launcher_fast(b, c, m, n, points, idx, weight, out); +} + +void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, + at::Tensor grad_out_tensor, + at::Tensor idx_tensor, + at::Tensor weight_tensor, + at::Tensor grad_points_tensor) { + + const float *grad_out = grad_out_tensor.data(); + const float *weight = weight_tensor.data(); + float *grad_points = grad_points_tensor.data(); + const int *idx = idx_tensor.data(); + + three_interpolate_grad_kernel_launcher_fast(b, c, n, m, grad_out, idx, weight, grad_points); +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate_gpu.cu b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate_gpu.cu new file mode 100644 index 0000000..d94e077 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate_gpu.cu @@ -0,0 +1,168 @@ +/* +batch version of point interpolation, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include + +#include "cuda_utils.h" +#include "interpolate_gpu.h" + + +__global__ void three_nn_kernel_fast(int b, int n, int m, const float *__restrict__ unknown, + const float *__restrict__ known, float *__restrict__ dist2, int *__restrict__ idx) { + // unknown: (B, N, 3) + // known: (B, M, 3) + // output: + // dist2: (B, N, 3) + // idx: (B, N, 3) + + int bs_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || pt_idx >= n) return; + + unknown += bs_idx * n * 3 + pt_idx * 3; + known += bs_idx * m * 3; + dist2 += bs_idx * n * 3 + pt_idx * 3; + idx += bs_idx * n * 3 + pt_idx * 3; + + float ux = unknown[0]; + float uy = unknown[1]; + float uz = unknown[2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; besti3 = besti2; + best2 = best1; besti2 = besti1; + best1 = d; besti1 = k; + } + else if (d < best2) { + best3 = best2; besti3 = besti2; + best2 = d; besti2 = k; + } + else if (d < best3) { + best3 = d; besti3 = k; + } + } + dist2[0] = best1; dist2[1] = best2; dist2[2] = best3; + idx[0] = besti1; idx[1] = besti2; idx[2] = besti3; +} + + +void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx) { + // unknown: (B, N, 3) + // known: (B, M, 3) + // output: + // dist2: (B, N, 3) + // idx: (B, N, 3) + + cudaError_t err; + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + three_nn_kernel_fast<<>>(b, n, m, unknown, known, dist2, idx); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void three_interpolate_kernel_fast(int b, int c, int m, int n, const float *__restrict__ points, + const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ out) { + // points: (B, C, M) + // idx: (B, N, 3) + // weight: (B, N, 3) + // output: + // out: (B, C, N) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; + + weight += bs_idx * n * 3 + pt_idx * 3; + points += bs_idx * c * m + c_idx * m; + idx += bs_idx * n * 3 + pt_idx * 3; + out += bs_idx * c * n + c_idx * n; + + out[pt_idx] = weight[0] * points[idx[0]] + weight[1] * points[idx[1]] + weight[2] * points[idx[2]]; +} + +void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, + const float *points, const int *idx, const float *weight, float *out) { + // points: (B, C, M) + // idx: (B, N, 3) + // weight: (B, N, 3) + // output: + // out: (B, C, N) + + cudaError_t err; + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_kernel_fast<<>>(b, c, m, n, points, idx, weight, out); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__global__ void three_interpolate_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, const float *__restrict__ weight, float *__restrict__ grad_points) { + // grad_out: (B, C, N) + // weight: (B, N, 3) + // output: + // grad_points: (B, C, M) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + + if (bs_idx >= b || c_idx >= c || pt_idx >= n) return; + + grad_out += bs_idx * c * n + c_idx * n + pt_idx; + weight += bs_idx * n * 3 + pt_idx * 3; + grad_points += bs_idx * c * m + c_idx * m; + idx += bs_idx * n * 3 + pt_idx * 3; + + + atomicAdd(grad_points + idx[0], grad_out[0] * weight[0]); + atomicAdd(grad_points + idx[1], grad_out[0] * weight[1]); + atomicAdd(grad_points + idx[2], grad_out[0] * weight[2]); +} + +void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, + const int *idx, const float *weight, float *grad_points) { + // grad_out: (B, C, N) + // weight: (B, N, 3) + // output: + // grad_points: (B, C, M) + + cudaError_t err; + dim3 blocks(DIVUP(n, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + three_interpolate_grad_kernel_fast<<>>(b, c, n, m, grad_out, idx, weight, grad_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate_gpu.h b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate_gpu.h new file mode 100644 index 0000000..19d41eb --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/interpolate_gpu.h @@ -0,0 +1,30 @@ +#ifndef _INTERPOLATE_GPU_H +#define _INTERPOLATE_GPU_H + +#include +#include +#include +#include + + +void three_nn_wrapper_fast(int b, int n, int m, at::Tensor unknown_tensor, + at::Tensor known_tensor, at::Tensor dist2_tensor, at::Tensor idx_tensor); + +void three_nn_kernel_launcher_fast(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx); + + +void three_interpolate_wrapper_fast(int b, int c, int m, int n, at::Tensor points_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor out_tensor); + +void three_interpolate_kernel_launcher_fast(int b, int c, int m, int n, + const float *points, const int *idx, const float *weight, float *out); + + +void three_interpolate_grad_wrapper_fast(int b, int c, int n, int m, at::Tensor grad_out_tensor, + at::Tensor idx_tensor, at::Tensor weight_tensor, at::Tensor grad_points_tensor); + +void three_interpolate_grad_kernel_launcher_fast(int b, int c, int n, int m, const float *grad_out, + const int *idx, const float *weight, float *grad_points); + +#endif diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/pointnet2_api.cpp b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/pointnet2_api.cpp new file mode 100644 index 0000000..10038bc --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/pointnet2_api.cpp @@ -0,0 +1,24 @@ +#include +#include + +#include "ball_query_gpu.h" +#include "group_points_gpu.h" +#include "sampling_gpu.h" +#include "interpolate_gpu.h" + + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("ball_query_wrapper", &ball_query_wrapper_fast, "ball_query_wrapper_fast"); + + m.def("group_points_wrapper", &group_points_wrapper_fast, "group_points_wrapper_fast"); + m.def("group_points_grad_wrapper", &group_points_grad_wrapper_fast, "group_points_grad_wrapper_fast"); + + m.def("gather_points_wrapper", &gather_points_wrapper_fast, "gather_points_wrapper_fast"); + m.def("gather_points_grad_wrapper", &gather_points_grad_wrapper_fast, "gather_points_grad_wrapper_fast"); + + m.def("furthest_point_sampling_wrapper", &furthest_point_sampling_wrapper, "furthest_point_sampling_wrapper"); + + m.def("three_nn_wrapper", &three_nn_wrapper_fast, "three_nn_wrapper_fast"); + m.def("three_interpolate_wrapper", &three_interpolate_wrapper_fast, "three_interpolate_wrapper_fast"); + m.def("three_interpolate_grad_wrapper", &three_interpolate_grad_wrapper_fast, "three_interpolate_grad_wrapper_fast"); +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling.cpp b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling.cpp new file mode 100644 index 0000000..30b1944 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling.cpp @@ -0,0 +1,48 @@ +/* +batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include +#include +#include "sampling_gpu.h" + + + + +int gather_points_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor){ + const float *points = points_tensor.data(); + const int *idx = idx_tensor.data(); + float *out = out_tensor.data(); + + gather_points_kernel_launcher_fast(b, c, n, npoints, points, idx, out); + return 1; +} + + +int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor) { + + const float *grad_out = grad_out_tensor.data(); + const int *idx = idx_tensor.data(); + float *grad_points = grad_points_tensor.data(); + + gather_points_grad_kernel_launcher_fast(b, c, n, npoints, grad_out, idx, grad_points); + return 1; +} + + +int furthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor) { + + const float *points = points_tensor.data(); + float *temp = temp_tensor.data(); + int *idx = idx_tensor.data(); + + furthest_point_sampling_kernel_launcher(b, n, m, points, temp, idx); + return 1; +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling_gpu.cu b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling_gpu.cu new file mode 100644 index 0000000..203bd82 --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling_gpu.cu @@ -0,0 +1,260 @@ +/* +batch version of point sampling and gathering, modified from the original implementation of official PointNet++ codes. +Written by Shaoshuai Shi +All Rights Reserved 2018. +*/ + + +#include +#include + +#include "cuda_utils.h" +#include "sampling_gpu.h" + + +__global__ void gather_points_kernel_fast(int b, int c, int n, int m, + const float *__restrict__ points, const int *__restrict__ idx, float *__restrict__ out) { + // points: (B, C, N) + // idx: (B, M) + // output: + // out: (B, C, M) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; + + out += bs_idx * c * m + c_idx * m + pt_idx; + idx += bs_idx * m + pt_idx; + points += bs_idx * c * n + c_idx * n; + out[0] = points[idx[0]]; +} + +void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *points, const int *idx, float *out) { + // points: (B, C, N) + // idx: (B, npoints) + // output: + // out: (B, C, npoints) + + cudaError_t err; + dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + gather_points_kernel_fast<<>>(b, c, n, npoints, points, idx, out); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + +__global__ void gather_points_grad_kernel_fast(int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, float *__restrict__ grad_points) { + // grad_out: (B, C, M) + // idx: (B, M) + // output: + // grad_points: (B, C, N) + + int bs_idx = blockIdx.z; + int c_idx = blockIdx.y; + int pt_idx = blockIdx.x * blockDim.x + threadIdx.x; + if (bs_idx >= b || c_idx >= c || pt_idx >= m) return; + + grad_out += bs_idx * c * m + c_idx * m + pt_idx; + idx += bs_idx * m + pt_idx; + grad_points += bs_idx * c * n + c_idx * n; + + atomicAdd(grad_points + idx[0], grad_out[0]); +} + +void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, float *grad_points) { + // grad_out: (B, C, npoints) + // idx: (B, npoints) + // output: + // grad_points: (B, C, N) + + cudaError_t err; + dim3 blocks(DIVUP(npoints, THREADS_PER_BLOCK), c, b); // blockIdx.x(col), blockIdx.y(row) + dim3 threads(THREADS_PER_BLOCK); + + gather_points_grad_kernel_fast<<>>(b, c, n, npoints, grad_out, idx, grad_points); + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} + + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, int idx1, int idx2){ + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +template +__global__ void furthest_point_sampling_kernel(int b, int n, int m, + const float *__restrict__ dataset, float *__restrict__ temp, int *__restrict__ idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) + idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + // float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + // if (mag <= 1e-3) + // continue; + + float d = (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 1024) { + if (tid < 512) { + __update(dists, dists_i, tid, tid + 512); + } + __syncthreads(); + } + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) + idxs[j] = old; + } +} + +void furthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs) { + // dataset: (B, N, 3) + // tmp: (B, N) + // output: + // idx: (B, M) + + cudaError_t err; + unsigned int n_threads = opt_n_threads(n); + + switch (n_threads) { + case 1024: + furthest_point_sampling_kernel<1024><<>>(b, n, m, dataset, temp, idxs); break; + case 512: + furthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); break; + case 256: + furthest_point_sampling_kernel<256><<>>(b, n, m, dataset, temp, idxs); break; + case 128: + furthest_point_sampling_kernel<128><<>>(b, n, m, dataset, temp, idxs); break; + case 64: + furthest_point_sampling_kernel<64><<>>(b, n, m, dataset, temp, idxs); break; + case 32: + furthest_point_sampling_kernel<32><<>>(b, n, m, dataset, temp, idxs); break; + case 16: + furthest_point_sampling_kernel<16><<>>(b, n, m, dataset, temp, idxs); break; + case 8: + furthest_point_sampling_kernel<8><<>>(b, n, m, dataset, temp, idxs); break; + case 4: + furthest_point_sampling_kernel<4><<>>(b, n, m, dataset, temp, idxs); break; + case 2: + furthest_point_sampling_kernel<2><<>>(b, n, m, dataset, temp, idxs); break; + case 1: + furthest_point_sampling_kernel<1><<>>(b, n, m, dataset, temp, idxs); break; + default: + furthest_point_sampling_kernel<512><<>>(b, n, m, dataset, temp, idxs); + } + + err = cudaGetLastError(); + if (cudaSuccess != err) { + fprintf(stderr, "CUDA kernel failed : %s\n", cudaGetErrorString(err)); + exit(-1); + } +} diff --git a/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling_gpu.h b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling_gpu.h new file mode 100644 index 0000000..034e04c --- /dev/null +++ b/models/LEO/model/pointnext/cpp/pointnet2_batch/src/sampling_gpu.h @@ -0,0 +1,29 @@ +#ifndef _SAMPLING_GPU_H +#define _SAMPLING_GPU_H + +#include +#include +#include + + +int gather_points_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor points_tensor, at::Tensor idx_tensor, at::Tensor out_tensor); + +void gather_points_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *points, const int *idx, float *out); + + +int gather_points_grad_wrapper_fast(int b, int c, int n, int npoints, + at::Tensor grad_out_tensor, at::Tensor idx_tensor, at::Tensor grad_points_tensor); + +void gather_points_grad_kernel_launcher_fast(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, float *grad_points); + + +int furthest_point_sampling_wrapper(int b, int n, int m, + at::Tensor points_tensor, at::Tensor temp_tensor, at::Tensor idx_tensor); + +void furthest_point_sampling_kernel_launcher(int b, int n, int m, + const float *dataset, float *temp, int *idxs); + +#endif diff --git a/models/LEO/model/pointnext/layers.py b/models/LEO/model/pointnext/layers.py new file mode 100644 index 0000000..9a0e432 --- /dev/null +++ b/models/LEO/model/pointnext/layers.py @@ -0,0 +1,722 @@ +import copy +from typing import Tuple + +import torch +import torch.nn as nn +import torch.nn.functional as F +from accelerate.logging import get_logger +from easydict import EasyDict as edict +from model.pointnext.cpp import pointnet2_cuda +from omegaconf import OmegaConf +from torch.autograd import Function + +logger = get_logger(__name__) + +CHANNEL_MAP = { + 'fj': lambda x: x, + 'df': lambda x: x, + 'assa': lambda x: x * 3, + 'assa_dp': lambda x: x * 3 + 3, + 'dp_fj': lambda x: 3 + x, + 'pj': lambda x: x, + 'dp': lambda x: 3, + 'pi_dp': lambda x: x + 3, + 'pj_dp': lambda x: x + 3, + 'dp_fj_df': lambda x: x * 2 + 3, + 'dp_fi_df': lambda x: x * 2 + 3, + 'pi_dp_fj_df': lambda x: x * 2 + 6, + 'pj_dp_fj_df': lambda x: x * 2 + 6, + 'pj_dp_df': lambda x: x + 6, + 'dp_df': lambda x: x + 3, +} + +# activation + +_ACT_LAYER = dict( + silu=nn.SiLU, + swish=nn.SiLU, + mish=nn.Mish, + relu=nn.ReLU, + relu6=nn.ReLU6, + leaky_relu=nn.LeakyReLU, + leakyrelu=nn.LeakyReLU, + elu=nn.ELU, + prelu=nn.PReLU, + celu=nn.CELU, + selu=nn.SELU, + gelu=nn.GELU, + sigmoid=nn.Sigmoid, + tanh=nn.Tanh, + hard_sigmoid=nn.Hardsigmoid, + hard_swish=nn.Hardswish, +) + + +def create_act(act_args): + """Build activation layer. + + Returns: + nn.Module: Created activation layer. + """ + if act_args is None: + return None + act_args = copy.deepcopy(act_args) + + if isinstance(act_args, str): + act_args = {'act': act_args} + + act = act_args.pop('act', None) + if act is None: + return None + + if isinstance(act, str): + act = act.lower() + assert act in _ACT_LAYER.keys(), f'input {act} is not supported' + act_layer = _ACT_LAYER[act] + + inplace = act_args.pop('inplace', True) + + if act not in ['gelu', 'sigmoid']: # TODO: add others + return act_layer(inplace=inplace, **act_args) + else: + return act_layer(**act_args) + + +# norm + + +class LayerNorm1d(nn.LayerNorm): + """LayerNorm for channels of '1D' spatial BCN tensors.""" + + def __init__(self, num_channels, **kwargs): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm(x.permute(0, 2, + 1), self.normalized_shape, self.weight, + self.bias, self.eps).permute(0, 2, 1).contiguous() + + +class LayerNorm2d(nn.LayerNorm): + """LayerNorm for channels of '2D' spatial BCHW tensors.""" + + def __init__(self, num_channels, **kwargs): + super().__init__(num_channels) + + def forward(self, x: torch.Tensor) -> torch.Tensor: + return F.layer_norm(x.permute(0, 2, 3, 1), self.normalized_shape, + self.weight, self.bias, + self.eps).permute(0, 3, 2, 1).contiguous() + + +class FastBatchNorm1d(nn.Module): + """Fast BachNorm1d for input with shape [B, N, C], where the feature + dimension is at last. + + Borrowed from torch-points3d: https://github.com/torch- points3d/torch- + points3d + """ + + def __init__(self, num_features, **kwargs): + super().__init__() + self.bn = nn.BatchNorm1d(num_features, **kwargs) + + def _forward_dense(self, x): + return self.bn(x.transpose(1, 2)).transpose(2, 1) + + def _forward_sparse(self, x): + return self.bn(x) + + def forward(self, x): + if x.dim() == 2: + return self._forward_sparse(x) + elif x.dim() == 3: + return self._forward_dense(x) + else: + raise ValueError('Non supported number of dimensions {}'.format( + x.dim())) + + +_NORM_LAYER = dict( + bn1d=nn.BatchNorm1d, + bn2d=nn.BatchNorm2d, + bn=nn.BatchNorm2d, + in2d=nn.InstanceNorm2d, + in1d=nn.InstanceNorm1d, + gn=nn.GroupNorm, + syncbn=nn.SyncBatchNorm, + ln=nn.LayerNorm, # for tokens + ln1d=LayerNorm1d, # for point cloud + ln2d=LayerNorm2d, # for point cloud + fastbn1d=FastBatchNorm1d, + fastbn2d=FastBatchNorm1d, + fastbn=FastBatchNorm1d, +) + + +def create_norm(norm_args, channels, dimension=None): + """Build normalization layer. + + Returns: + nn.Module: Created normalization layer. + """ + if norm_args is None: + return None + if isinstance(norm_args, dict): + norm_args = edict(copy.deepcopy(norm_args)) + norm = norm_args.pop('norm', None) + else: + norm = norm_args + norm_args = edict() + if norm is None: + return None + if isinstance(norm, str): + norm = norm.lower() + if dimension is not None: + dimension = str(dimension).lower() + if dimension not in norm: + norm += dimension + assert norm in _NORM_LAYER.keys(), f'input {norm} is not supported' + norm = _NORM_LAYER[norm] + return norm(channels, **norm_args) + + +# conv + + +class Conv1d(nn.Conv1d): + + def __init__(self, *args, **kwargs): + if len(args) == 2 and 'kernel_size' not in kwargs.keys(): + super(Conv1d, self).__init__(*args, 1, **kwargs) + else: + super(Conv1d, self).__init__(*args, **kwargs) + + +class Conv2d(nn.Conv2d): + + def __init__(self, *args, **kwargs): + if len(args) == 2 and 'kernel_size' not in kwargs.keys(): + super(Conv2d, self).__init__(*args, (1, 1), **kwargs) + else: + super(Conv2d, self).__init__(*args, **kwargs) + + +def create_convblock1d(*args, + norm_args=None, + act_args=None, + order='conv-norm-act', + **kwargs): + out_channels = args[1] + in_channels = args[0] + bias = kwargs.pop('bias', True) + if order == 'conv-norm-act': + norm_layer = create_norm(norm_args, out_channels, dimension='1d') + bias = False if norm_layer is not None else bias + conv_layer = [Conv1d(*args, bias=bias, **kwargs)] + if norm_layer is not None: + conv_layer.append(norm_layer) + act_layer = create_act(act_args) + if act_args is not None: + conv_layer.append(act_layer) + + elif order == 'norm-act-conv': + conv_layer = [] + norm_layer = create_norm(norm_args, in_channels, dimension='1d') + bias = False if norm_layer is not None else bias + if norm_layer is not None: + conv_layer.append(norm_layer) + act_layer = create_act(act_args) + if act_args is not None: + conv_layer.append(act_layer) + conv_layer.append(Conv1d(*args, bias=bias, **kwargs)) + + elif order == 'conv-act-norm': + norm_layer = create_norm(norm_args, out_channels, dimension='1d') + bias = False if norm_layer is not None else bias + conv_layer = [Conv1d(*args, bias=bias, **kwargs)] + act_layer = create_act(act_args) + if act_args is not None: + conv_layer.append(act_layer) + if norm_layer is not None: + conv_layer.append(norm_layer) + else: + raise NotImplementedError(f'{order} is not supported') + + return nn.Sequential(*conv_layer) + + +def create_convblock2d(*args, + norm_args=None, + act_args=None, + order='conv-norm-act', + **kwargs): + in_channels = args[0] + out_channels = args[1] + bias = kwargs.pop('bias', True) + if order == 'conv-norm-act': + norm_layer = create_norm(norm_args, out_channels, dimension='2d') + bias = False if norm_layer is not None else bias + conv_layer = [Conv2d(*args, bias=bias, **kwargs)] + if norm_layer is not None: + conv_layer.append(norm_layer) + act_layer = create_act(act_args) + if act_args is not None: + conv_layer.append(act_layer) + + elif order == 'norm-act-conv': + conv_layer = [] + norm_layer = create_norm(norm_args, in_channels, dimension='2d') + bias = False if norm_layer is not None else bias + if norm_layer is not None: + conv_layer.append(norm_layer) + act_layer = create_act(act_args) + if act_args is not None: + conv_layer.append(act_layer) + conv_layer.append(Conv2d(*args, bias=bias, **kwargs)) + + elif order == 'conv-act-norm': + norm_layer = create_norm(norm_args, out_channels, dimension='2d') + bias = False if norm_layer is not None else bias + conv_layer = [Conv2d(*args, bias=bias, **kwargs)] + act_layer = create_act(act_args) + if act_args is not None: + conv_layer.append(act_layer) + if norm_layer is not None: + conv_layer.append(norm_layer) + else: + raise NotImplementedError(f'{order} is not supported') + + return nn.Sequential(*conv_layer) + + +# group + + +class KNN(nn.Module): + + def __init__(self, neighbors, transpose_mode=True): + super(KNN, self).__init__() + self.neighbors = neighbors + + @torch.no_grad() + def forward(self, support, query): + """ + Args: + support ([tensor]): [B, N, C] + query ([tensor]): [B, M, C] + Returns: + [int]: neighbor idx. [B, M, K] + """ + dist = torch.cdist(support, query) + k_dist = dist.topk(k=self.neighbors, dim=1, largest=False) + return k_dist.values, k_dist.indices.transpose(1, 2).contiguous().int() + + +class GroupingOperation(Function): + + @staticmethod + @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32) + def forward(ctx, features: torch.Tensor, + idx: torch.Tensor) -> torch.Tensor: + """ + :param ctx: + :param features: (B, C, N) tensor of features to group + :param idx: (B, npoint, nsample) tensor containing the indicies of features to group with + :return: + output: (B, C, npoint, nsample) tensor + """ + assert features.is_contiguous() + assert idx.is_contiguous() + + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + output = torch.cuda.FloatTensor(B, + C, + nfeatures, + nsample, + device=features.device) + + pointnet2_cuda.group_points_wrapper(B, C, N, nfeatures, nsample, + features, idx, output) + + ctx.for_backwards = (idx, N) + return output + + @staticmethod + def backward(ctx, + grad_out: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """ + :param ctx: + :param grad_out: (B, C, npoint, nsample) tensor of the gradients of the output from forward + :return: + grad_features: (B, C, N) gradient of the features + """ + idx, N = ctx.for_backwards + + B, C, npoint, nsample = grad_out.size() + grad_features = torch.zeros([B, C, N], + dtype=torch.float, + device=grad_out.device, + requires_grad=True) + grad_out_data = grad_out.data.contiguous() + pointnet2_cuda.group_points_grad_wrapper(B, C, N, npoint, nsample, + grad_out_data, idx, + grad_features.data) + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius: float, nsample: int, xyz: torch.Tensor, + new_xyz: torch.Tensor) -> torch.Tensor: + """ + :param ctx: + :param radius: float, radius of the balls + :param nsample: int, maximum number of features in the balls + :param xyz: (B, N, 3) xyz coordinates of the features + :param new_xyz: (B, npoint, 3) centers of the ball query + :return: + idx: (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + assert new_xyz.is_contiguous() + assert xyz.is_contiguous() + + B, N, _ = xyz.size() + npoint = new_xyz.size(1) + idx = torch.cuda.IntTensor(B, npoint, nsample, + device=xyz.device).zero_() + pointnet2_cuda.ball_query_wrapper(B, N, npoint, radius, nsample, + new_xyz, xyz, idx) + return idx + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + + def __init__(self, + radius: float, + nsample: int, + relative_xyz=True, + normalize_dp=False, + normalize_by_std=False, + normalize_by_allstd=False, + normalize_by_allstd2=False, + return_only_idx=False, + **kwargs): + """[summary] + + Args: + radius (float): radius of ball + nsample (int): maximum number of features to gather in the ball + use_xyz (bool, optional): concate xyz. Defaults to True. + ret_grouped_xyz (bool, optional): [description]. Defaults to False. + normalize_dp (bool, optional): [description]. Defaults to False. + """ + super().__init__() + self.radius, self.nsample = radius, nsample + self.normalize_dp = normalize_dp + self.normalize_by_std = normalize_by_std + self.normalize_by_allstd = normalize_by_allstd + self.normalize_by_allstd2 = normalize_by_allstd2 + assert self.normalize_dp + self.normalize_by_std + self.normalize_by_allstd < 2 # only nomalize by one method + self.relative_xyz = relative_xyz + self.return_only_idx = return_only_idx + + def forward(self, + query_xyz: torch.Tensor, + support_xyz: torch.Tensor, + features: torch.Tensor = None) -> Tuple[torch.Tensor]: + """ + :param query_xyz: (B, npoint, 3) xyz coordinates of the features + :param support_xyz: (B, N, 3) centroids + :param features: (B, C, N) descriptors of the features + :return: + new_features: (B, 3 + C, npoint, nsample) + """ + idx = ball_query(self.radius, self.nsample, support_xyz, query_xyz) + + if self.return_only_idx: + return idx + xyz_trans = support_xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation(xyz_trans, + idx) # (B, 3, npoint, nsample) + if self.relative_xyz: + grouped_xyz = grouped_xyz - query_xyz.transpose(1, 2).unsqueeze( + -1) # relative position + if self.normalize_dp: + grouped_xyz /= self.radius + grouped_features = grouping_operation( + features, idx) if features is not None else None + return grouped_xyz, grouped_features + + +class GroupAll(nn.Module): + + def __init__(self, ): + super().__init__() + + def forward(self, + new_xyz: torch.Tensor, + xyz: torch.Tensor, + features: torch.Tensor = None): + """ + :param xyz: (B, N, 3) xyz coordinates of the features + :param new_xyz: ignored + :param features: (B, C, N) descriptors of the features + :return: + new_features: (B, C + 3, 1, N) + """ + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + grouped_features = features.unsqueeze( + 2) if features is not None else None + return grouped_xyz, grouped_features + + +class KNNGroup(nn.Module): + + def __init__(self, + nsample: int, + relative_xyz=True, + normalize_dp=False, + return_only_idx=False, + **kwargs): + """[summary] + + Args: + nsample (int): maximum number of features to gather in the ball + use_xyz (bool, optional): concate xyz. Defaults to True. + ret_grouped_xyz (bool, optional): [description]. Defaults to False. + normalize_dp (bool, optional): [description]. Defaults to False. + """ + super().__init__() + self.nsample = nsample + self.knn = KNN(nsample, transpose_mode=True) + self.relative_xyz = relative_xyz + self.normalize_dp = normalize_dp + self.return_only_idx = return_only_idx + + def forward(self, + query_xyz: torch.Tensor, + support_xyz: torch.Tensor, + features: torch.Tensor = None) -> Tuple[torch.Tensor]: + """ + :param query_xyz: (B, N, 3) xyz coordinates of the features + :param support_xyz: (B, npoint, 3) centroids + :param features: (B, C, N) descriptors of the features + :return: + new_features: (B, 3 + C, npoint, nsample) + """ + _, idx = self.knn(support_xyz, query_xyz) + if self.return_only_idx: + return idx + idx = idx.int() + xyz_trans = support_xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation(xyz_trans, + idx) # (B, 3, npoint, nsample) + if self.relative_xyz: + grouped_xyz -= query_xyz.transpose(1, 2).unsqueeze( + -1) # relative position + if self.normalize_dp: + grouped_xyz /= torch.amax(torch.sqrt( + torch.sum(grouped_xyz**2, dim=1)), + dim=(1, 2)).view(-1, 1, 1, 1) + if features is not None: + grouped_features = grouping_operation(features, idx) + return grouped_xyz, grouped_features + else: + return grouped_xyz, None + + +def create_grouper(group_args): + # group_args_copy = copy.deepcopy(group_args) + group_args_copy = copy.deepcopy(OmegaConf.to_object(group_args)) + method = group_args_copy.pop('NAME', 'ballquery') + radius = group_args_copy.pop('radius', 0.1) + nsample = group_args_copy.pop('nsample', 20) + + logger.debug(group_args) + if nsample is not None: + if method == 'ballquery': + grouper = QueryAndGroup(radius, nsample, **group_args_copy) + elif method == 'knn': + grouper = KNNGroup(nsample, **group_args_copy) + else: + grouper = GroupAll() + return grouper + + +def get_aggregation_feautres(p, dp, f, fj, feature_type='dp_fj'): + if feature_type == 'dp_fj': + fj = torch.cat([dp, fj], 1) + elif feature_type == 'dp_fj_df': + df = fj - f.unsqueeze(-1) + fj = torch.cat([dp, fj, df], 1) + elif feature_type == 'pi_dp_fj_df': + df = fj - f.unsqueeze(-1) + fj = torch.cat([ + p.transpose(1, 2).unsqueeze(-1).expand(-1, -1, -1, df.shape[-1]), + dp, fj, df + ], 1) + elif feature_type == 'dp_df': + df = fj - f.unsqueeze(-1) + fj = torch.cat([dp, df], 1) + return fj + + +# subsample + + +def random_sample(xyz, npoint): + B, N, _ = xyz.shape + idx = torch.randint(0, N, (B, npoint), device=xyz.device) + return idx + + +class FurthestPointSampling(Function): + + @staticmethod + def forward(ctx, xyz: torch.Tensor, npoint: int) -> torch.Tensor: + """Uses iterative furthest point sampling to select a set of npoint + features that have the largest minimum distance. + + :param ctx: + :param xyz: (B, N, 3) where N > npoint + :param npoint: int, number of features in the sampled set + :return: + output: (B, npoint) tensor containing the set (idx) + """ + assert xyz.is_contiguous() + + B, N, _ = xyz.size() + # output = torch.cuda.IntTensor(B, npoint, device=xyz.device) + # temp = torch.cuda.FloatTensor(B, N, device=xyz.device).fill_(1e10) + output = torch.cuda.IntTensor(B, npoint) + temp = torch.cuda.FloatTensor(B, N).fill_(1e10) + + pointnet2_cuda.furthest_point_sampling_wrapper(B, N, npoint, xyz, temp, + output) + return output + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + +# upsample + + +class ThreeNN(Function): + + @staticmethod + def forward(ctx, unknown: torch.Tensor, + known: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]: + """Find the three nearest neighbors of unknown in known. + + :param ctx: + :param unknown: (B, N, 3) + :param known: (B, M, 3) + :return: + dist: (B, N, 3) l2 distance to the three nearest neighbors + idx: (B, N, 3) index of 3 nearest neighbors + """ + assert unknown.is_contiguous() + assert known.is_contiguous() + + B, N, _ = unknown.size() + m = known.size(1) + dist2 = torch.cuda.FloatTensor(B, N, 3) + idx = torch.cuda.IntTensor(B, N, 3) + + pointnet2_cuda.three_nn_wrapper(B, N, m, unknown, known, dist2, idx) + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + @torch.cuda.amp.custom_fwd(cast_inputs=torch.float32) + def forward(ctx, features: torch.Tensor, idx: torch.Tensor, + weight: torch.Tensor) -> torch.Tensor: + """Performs weight linear interpolation on 3 features. + + :param ctx: + :param features: (B, C, M) Features descriptors to be interpolated from + :param idx: (B, n, 3) three nearest neighbors of the target features in features + :param weight: (B, n, 3) weights + :return: + output: (B, C, N) tensor of the interpolated features + """ + assert features.is_contiguous() + assert idx.is_contiguous() + assert weight.is_contiguous() + + B, c, m = features.size() + n = idx.size(1) + ctx.three_interpolate_for_backward = (idx, weight, m) + output = torch.cuda.FloatTensor(B, c, n) + + pointnet2_cuda.three_interpolate_wrapper(B, c, m, n, features, idx, + weight, output) + return output + + @staticmethod + def backward( + ctx, grad_out: torch.Tensor + ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]: + """ + :param ctx: + :param grad_out: (B, C, N) tensor with gradients of outputs + :return: + grad_features: (B, C, M) tensor with gradients of features + None: + None: + """ + idx, weight, m = ctx.three_interpolate_for_backward + B, c, n = grad_out.size() + + grad_features = torch.zeros([B, c, m], + device='cuda', + requires_grad=True) + grad_out_data = grad_out.data.contiguous() + + pointnet2_cuda.three_interpolate_grad_wrapper(B, c, n, m, + grad_out_data, idx, + weight, + grad_features.data) + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +def three_interpolation(unknown_xyz, known_xyz, know_feat): + """ + input: known_xyz: (m, 3), unknown_xyz: (n, 3), feat: (m, c), offset: (b), new_offset: (b) + output: (n, c) + """ + dist, idx = three_nn(unknown_xyz, known_xyz) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + interpolated_feats = three_interpolate(know_feat, idx, weight) + return interpolated_feats diff --git a/models/LEO/model/pointnext/pointnext.py b/models/LEO/model/pointnext/pointnext.py new file mode 100644 index 0000000..d89ad96 --- /dev/null +++ b/models/LEO/model/pointnext/pointnext.py @@ -0,0 +1,556 @@ +"""Adapted from PointNeXt implementation in repo: + +https://github.com/guochengqian/openpoints. +""" +from typing import List + +import torch +import torch.nn as nn +from accelerate.logging import get_logger +from einops import rearrange +from model.pointnext.layers import (CHANNEL_MAP, create_act, + create_convblock1d, create_convblock2d, + create_grouper, furthest_point_sample, + get_aggregation_feautres, random_sample, + three_interpolation) + +logger = get_logger(__name__) + + +def get_reduction_fn(reduction): + reduction = 'mean' if reduction.lower() == 'avg' else reduction + assert reduction in ['sum', 'max', 'mean'] + if reduction == 'max': + pool = lambda x: torch.max(x, dim=-1, keepdim=False)[0] + elif reduction == 'mean': + pool = lambda x: torch.mean(x, dim=-1, keepdim=False) + elif reduction == 'sum': + pool = lambda x: torch.sum(x, dim=-1, keepdim=False) + return pool + + +class LocalAggregation(nn.Module): + """Local aggregation layer for a set Set abstraction layer abstracts + features from a larger set to a smaller set Local aggregation layer + aggregates features from the same set.""" + + def __init__(self, + channels: List[int], + norm_args={'norm': 'bn1d'}, + act_args={'act': 'relu'}, + group_args={ + 'NAME': 'ballquery', + 'radius': 0.1, + 'nsample': 16 + }, + conv_args=None, + feature_type='dp_fj', + reduction='max', + last_act=True, + **kwargs): + super().__init__() + if kwargs: + logger.warning( + f'kwargs: {kwargs} are not used in {__class__.__name__}') + channels[0] = CHANNEL_MAP[feature_type](channels[0]) + convs = [] + for i in range(len(channels) - 1): # number of layers in each block + convs.append( + create_convblock2d(channels[i], + channels[i + 1], + norm_args=norm_args, + act_args=None if i == (len(channels) - 2) + and not last_act else act_args, + **conv_args)) + self.convs = nn.Sequential(*convs) + self.grouper = create_grouper(group_args) + self.reduction = reduction.lower() + self.pool = get_reduction_fn(self.reduction) + self.feature_type = feature_type + + def forward(self, pf) -> torch.Tensor: + # p: position, f: feature + p, f = pf + # neighborhood_features + dp, fj = self.grouper(p, p, f) + fj = get_aggregation_feautres(p, dp, f, fj, self.feature_type) + f = self.pool(self.convs(fj)) + """ DEBUG neighbor numbers. + if f.shape[-1] != 1: + query_xyz, support_xyz = p, p + radius = self.grouper.radius + dist = torch.cdist(query_xyz.cpu(), support_xyz.cpu()) + points = len(dist[dist < radius]) / (dist.shape[0] * dist.shape[1]) + logger.info(f'query size: {query_xyz.shape}, support size: {support_xyz.shape}, radius: {radius}, num_neighbors: {points}') + DEBUG end """ + return f + + +class SetAbstraction(nn.Module): + """The modified set abstraction module in PointNet++ with residual + connection support.""" + + def __init__( + self, + in_channels, + out_channels, + layers=1, + stride=1, + group_args={ + 'NAME': 'ballquery', + 'radius': 0.1, + 'nsample': 16 + }, + norm_args={'norm': 'bn1d'}, + act_args={'act': 'relu'}, + conv_args=None, + sampler='fps', + feature_type='dp_fj', + use_res=False, + is_head=False, + **kwargs, + ): + super().__init__() + self.stride = stride + self.is_head = is_head + self.all_aggr = not is_head and stride == 1 + self.use_res = use_res and not self.all_aggr and not self.is_head + self.feature_type = feature_type + + mid_channel = out_channels // 2 if stride > 1 else out_channels + channels = [in_channels] + [mid_channel] * \ + (layers - 1) + [out_channels] + channels[0] = in_channels if is_head else CHANNEL_MAP[feature_type]( + channels[0]) + + if self.use_res: + self.skipconv = create_convblock1d( + in_channels, channels[-1], norm_args=None, act_args=None + ) if in_channels != channels[-1] else nn.Identity() + self.act = create_act(act_args) + + # actually, one can use local aggregation layer to replace the following + create_conv = create_convblock1d if is_head else create_convblock2d + convs = [] + for i in range(len(channels) - 1): + convs.append( + create_conv(channels[i], + channels[i + 1], + norm_args=norm_args if not is_head else None, + act_args=None if i == len(channels) - 2 and + (self.use_res or is_head) else act_args, + **conv_args)) + self.convs = nn.Sequential(*convs) + if not is_head: + if self.all_aggr: + group_args.nsample = None + group_args.radius = None + self.grouper = create_grouper(group_args) + self.pool = lambda x: torch.max(x, dim=-1, keepdim=False)[0] + if sampler.lower() == 'fps': + self.sample_fn = furthest_point_sample + elif sampler.lower() == 'random': + self.sample_fn = random_sample + + def forward(self, pf): + p, f = pf + if self.is_head: + f = self.convs(f) # (n, c) + else: + if not self.all_aggr: + idx = self.sample_fn(p, p.shape[1] // self.stride).long() + new_p = torch.gather(p, 1, idx.unsqueeze(-1).expand(-1, -1, 3)) + else: + new_p = p + """ DEBUG neighbor numbers. + query_xyz, support_xyz = new_p, p + radius = self.grouper.radius + dist = torch.cdist(query_xyz.cpu(), support_xyz.cpu()) + points = len(dist[dist < radius]) / (dist.shape[0] * dist.shape[1]) + logger.info(f'query size: {query_xyz.shape}, support size: {support_xyz.shape}, radius: {radius}, num_neighbors: {points}') + DEBUG end """ + if self.use_res or 'df' in self.feature_type: + fi = torch.gather(f, -1, + idx.unsqueeze(1).expand(-1, f.shape[1], -1)) + if self.use_res: + identity = self.skipconv(fi) + else: + fi = None + dp, fj = self.grouper(new_p, p, f) + fj = get_aggregation_feautres(new_p, + dp, + fi, + fj, + feature_type=self.feature_type) + f = self.pool(self.convs(fj)) + if self.use_res: + f = self.act(f + identity) + p = new_p + return p, f + + +class FeaturePropogation(nn.Module): + """The Feature Propogation module in PointNet++""" + + def __init__(self, + mlp, + upsample=True, + norm_args={'norm': 'bn1d'}, + act_args={'act': 'relu'}): + """ + Args: + mlp: [current_channels, next_channels, next_channels] + out_channels: + norm_args: + act_args: + """ + super().__init__() + if not upsample: + self.linear2 = nn.Sequential(nn.Linear(mlp[0], mlp[1]), + nn.ReLU(inplace=True)) + mlp[1] *= 2 + linear1 = [] + for i in range(1, len(mlp) - 1): + linear1.append( + create_convblock1d(mlp[i], + mlp[i + 1], + norm_args=norm_args, + act_args=act_args)) + self.linear1 = nn.Sequential(*linear1) + else: + convs = [] + for i in range(len(mlp) - 1): + convs.append( + create_convblock1d(mlp[i], + mlp[i + 1], + norm_args=norm_args, + act_args=act_args)) + self.convs = nn.Sequential(*convs) + + self.pool = lambda x: torch.mean(x, dim=-1, keepdim=False) + + def forward(self, pf1, pf2=None): + # pfb1 is with the same size of upsampled points + if pf2 is None: + _, f = pf1 # (B, N, 3), (B, C, N) + f_global = self.pool(f) + f = torch.cat((f, self.linear2(f_global).unsqueeze(-1).expand( + -1, -1, f.shape[-1])), + dim=1) + f = self.linear1(f) + else: + p1, f1 = pf1 + p2, f2 = pf2 + if f1 is not None: + f = self.convs( + torch.cat((f1, three_interpolation(p1, p2, f2)), dim=1)) + else: + f = self.convs(three_interpolation(p1, p2, f2)) + return f + + +class InvResMLP(nn.Module): + + def __init__(self, + in_channels, + norm_args=None, + act_args=None, + aggr_args={ + 'feature_type': 'dp_fj', + 'reduction': 'max' + }, + group_args={'NAME': 'ballquery'}, + conv_args=None, + expansion=1, + use_res=True, + num_posconvs=2, + less_act=False, + **kwargs): + super().__init__() + self.use_res = use_res + mid_channels = int(in_channels * expansion) + self.convs = LocalAggregation( + [in_channels, in_channels], + norm_args=norm_args, + act_args=act_args if num_posconvs > 0 else None, + group_args=group_args, + conv_args=conv_args, + **aggr_args, + **kwargs) + if num_posconvs < 1: + channels = [] + elif num_posconvs == 1: + channels = [in_channels, in_channels] + else: + channels = [in_channels, mid_channels, in_channels] + pwconv = [] + # point wise after depth wise conv (without last layer) + for i in range(len(channels) - 1): + pwconv.append( + create_convblock1d( + channels[i], + channels[i + 1], + norm_args=norm_args, + act_args=act_args if + (i != len(channels) - 2) and not less_act else None, + **conv_args)) + self.pwconv = nn.Sequential(*pwconv) + self.act = create_act(act_args) + + def forward(self, pf): + p, f = pf + identity = f + f = self.convs([p, f]) + f = self.pwconv(f) + if f.shape[-1] == identity.shape[-1] and self.use_res: + f += identity + f = self.act(f) + return [p, f] + + +class ResBlock(nn.Module): + + def __init__(self, + in_channels, + norm_args=None, + act_args=None, + aggr_args={ + 'feature_type': 'dp_fj', + 'reduction': 'max' + }, + group_args={'NAME': 'ballquery'}, + conv_args=None, + expansion=1, + use_res=True, + **kwargs): + super().__init__() + self.use_res = use_res + mid_channels = in_channels * expansion + self.convs = LocalAggregation( + [in_channels, in_channels, mid_channels, in_channels], + norm_args=norm_args, + act_args=None, + group_args=group_args, + conv_args=conv_args, + **aggr_args, + **kwargs) + self.act = create_act(act_args) + + def forward(self, pf): + p, f = pf + identity = f + f = self.convs([p, f]) + if f.shape[-1] == identity.shape[-1] and self.use_res: + f += identity + f = self.act(f) + return [p, f] + + +class PointNext(nn.Module): + + def __init__( + self, + in_channels=3, + width=32, + num_blocks=[1, 4, 7, 4, 4], + strides=[1, 4, 4, 4, 4], + block='InvResMLP', + nsample=32, + radius=0.1, + conv_args=None, + aggr_args={ + 'feature_type': 'dp_fj', + 'reduction': 'max' + }, + group_args={ + 'NAME': 'ballquery', + 'radius': 0.1, + 'nsample': 32 + }, + norm_args={'norm': 'bn'}, + act_args={'act': 'relu'}, + sampler='fps', + expansion=4, + sa_layers=1, + sa_use_res=False, + use_res=True, + radius_scaling=2, + nsample_scaling=1, + ): + super().__init__() + self.in_channels = in_channels + block = eval(block) + self.num_blocks = num_blocks + self.strides = strides + + self.aggr_args = aggr_args + self.norm_args = norm_args + self.act_args = act_args + self.conv_args = conv_args + self.sampler = sampler + self.expansion = expansion + self.sa_layers = sa_layers + self.sa_use_res = sa_use_res + self.use_res = use_res + self.radius = radius + + self.radii = self._to_full_list(self.radius, radius_scaling) + self.nsample = self._to_full_list(nsample, nsample_scaling) + logger.debug( + f'PointNext: radius = {self.radii}, nsample = {self.nsample}') + + # double width after downsampling + channels = [] + for stride in self.strides: + if stride != 1: + width *= 2 + channels.append(width) + encoder = [] + for i in range(len(self.num_blocks)): + group_args.radius = self.radii[i] + group_args.nsample = self.nsample[i] + encoder.append( + self._make_enc(block, + channels[i], + self.num_blocks[i], + stride=self.strides[i], + group_args=group_args, + is_head=i == 0 and self.strides[i] == 1)) + self.encoder = nn.Sequential(*encoder) + self.out_dim = channels[-1] + self.channel_list = channels + self.pool = get_reduction_fn(self.aggr_args.reduction) + + @property + def device(self): + return list(self.parameters())[0].device + + def _to_full_list(self, param, param_scaling=1): + # param can be : radius, nsample + param_list = [] + if isinstance(param, list): + # make param a full list + for i, value in enumerate(param): + value = [value] if not isinstance(value, list) else value + if len(value) != self.num_blocks[i]: + value += [value[-1]] * (self.num_blocks[i] - len(value)) + param_list.append(value) + else: # radius is a scalar (in this case, only initial radius is provided), then create a list (radius for each block) + for i, stride in enumerate(self.strides): + if stride == 1: + param_list.append([param] * self.num_blocks[i]) + else: + param_list.append([param] + [param * param_scaling] * + (self.num_blocks[i] - 1)) + param *= param_scaling + return param_list + + def _make_enc(self, + block, + channels, + num_blocks, + stride, + group_args, + is_head=False): + layers = [] + radii = group_args.radius + nsample = group_args.nsample + group_args.radius = radii[0] + group_args.nsample = nsample[0] + layers.append( + SetAbstraction(self.in_channels, + channels, + self.sa_layers if not is_head else 1, + stride, + group_args=group_args, + sampler=self.sampler, + norm_args=self.norm_args, + act_args=self.act_args, + conv_args=self.conv_args, + is_head=is_head, + use_res=self.sa_use_res, + **self.aggr_args)) + self.in_channels = channels + for i in range(1, num_blocks): + group_args.radius = radii[i] + group_args.nsample = nsample[i] + layers.append( + block(self.in_channels, + aggr_args=self.aggr_args, + norm_args=self.norm_args, + act_args=self.act_args, + group_args=group_args, + conv_args=self.conv_args, + expansion=self.expansion, + use_res=self.use_res)) + return nn.Sequential(*layers) + + def forward_cls_feat(self, x): + p0 = x[..., :3].to(self.device) + f0 = x[..., 3:].to(self.device) + + if x.ndim == 4: + # (batch_size, num_objects, num_points, num_channels) + batch_size = x.shape[0] + p0 = rearrange(p0, 'b o p d -> (b o) p d').contiguous() + f0 = rearrange(f0, 'b o p c -> (b o) c p').contiguous() + + for i in range(len(self.encoder)): + p0, f0 = self.encoder[i]([p0, f0]) + f0 = self.pool(f0) + return rearrange(f0, '(b o) c -> b o c', b=batch_size) + + elif x.ndim == 3: + # (batch_size * num_objects, num_points, num_channels) + p0 = p0.contiguous() + f0 = rearrange(f0, 'bo p c -> bo c p').contiguous() + + for i in range(len(self.encoder)): + p0, f0 = self.encoder[i]([p0, f0]) + f0 = self.pool(f0) + return f0 + + else: + raise ValueError( + 'Point cloud input shape incorrect, ndim should be either 3 or 4.' + ) + + def forward_seg_feat(self, x): + p0 = x[..., :3].to(self.device) + f0 = x[..., 3:].to(self.device) + + if x.ndim == 4: + # (batch_size, num_objects, num_points, num_channels) + batch_size = x.shape[0] + p0 = rearrange(p0, 'b o p d -> (b o) p d').contiguous() + f0 = rearrange(f0, 'b o p c -> (b o) c p').contiguous() + + p, f = [p0], [f0] + for i in range(len(self.encoder)): + _p, _f = self.encoder[i]([p[-1], f[-1]]) + p.append(_p) + f.append(_f) + for i in range(len(p)): + p[i] = rearrange(p[i], '(b o) p d -> b o p d', b=batch_size) + f[i] = rearrange(f[i], '(b o) c p -> b o c p', b=batch_size) + return p, f + + elif x.ndim == 3: + # (batch_size * num_objects, num_points, num_channels) + p0 = p0.contiguous() + f0 = rearrange(f0, 'bo p c -> bo c p').contiguous() + + p, f = [p0], [f0] + for i in range(len(self.encoder)): + _p, _f = self.encoder[i]([p[-1], f[-1]]) + p.append(_p) + f.append(_f) + return p, f + + else: + raise ValueError( + 'Point cloud input shape incorrect, ndim should be either 3 or 4.' + ) + + def forward(self, x): + return self.forward_cls_feat(x) diff --git a/models/LEO/model/transformers.py b/models/LEO/model/transformers.py new file mode 100755 index 0000000..d7e84c9 --- /dev/null +++ b/models/LEO/model/transformers.py @@ -0,0 +1,1270 @@ +import math +from typing import Optional, Tuple, Union + +import einops +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +from accelerate.logging import get_logger +from model.utils import get_activation_fn +from torch import Tensor, nn +from transformers.activations import ACT2FN +from transformers.modeling_outputs import \ + BaseModelOutputWithPastAndCrossAttentions +from transformers.modeling_utils import (Conv1D, + find_pruneable_heads_and_indices, + prune_conv1d_layer) + +logger = get_logger(__name__) + + +class CrossAttentionLayer(nn.Module): + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation='relu', + k_dim=None, + v_dim=None, + prenorm=True): + super().__init__() + if k_dim is None: + k_dim = d_model + if v_dim is None: + v_dim = d_model + self.prenorm = prenorm + self.multihead_attn = nn.MultiheadAttention(d_model, + nhead, + dropout=dropout, + batch_first=True, + kdim=k_dim, + vdim=v_dim) + # Implementation of Feedforward modules + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = get_activation_fn(activation) + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + ): + tgt2 = tgt + if self.prenorm: + tgt2 = self.norm1(tgt2) + tgt2, cross_attn_matrices = self.multihead_attn( + query=tgt2, + key=memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask) + tgt = tgt + self.dropout2(tgt2) + if not self.prenorm: + tgt = self.norm1(tgt) + if self.prenorm: + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + if not self.prenorm: + tgt = self.norm3(tgt) + return tgt, cross_attn_matrices + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation='relu'): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, + nhead, + dropout=dropout, + batch_first=True) + self.multihead_attn = nn.MultiheadAttention(d_model, + nhead, + dropout=dropout, + batch_first=True) + # Implementation of Feedforward modules + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.norm3 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + self.dropout3 = nn.Dropout(dropout) + + self.activation = get_activation_fn(activation) + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + ): + tgt2 = self.norm1(tgt) + tgt2, self_attn_matrices = self.self_attn( + query=tgt2, + key=tgt2, + value=tgt2, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask) + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2, cross_attn_matrices = self.multihead_attn( + query=tgt2, + key=memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask) + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt, self_attn_matrices, cross_attn_matrices + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + batch_first=True, + dropout=0.1, + activation='relu', + prenorm=False): + super().__init__() + self.self_attn = nn.MultiheadAttention(d_model, + nhead, + dropout=dropout, + batch_first=batch_first) + # Implementation of Feedforward modules + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.norm1 = nn.LayerNorm(d_model) + self.norm2 = nn.LayerNorm(d_model) + self.dropout1 = nn.Dropout(dropout) + self.dropout2 = nn.Dropout(dropout) + + self.activation = get_activation_fn(activation) + self.prenorm = prenorm + + def forward( + self, + tgt, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + ): + tgt2 = tgt + if self.prenorm: + tgt2 = self.norm1(tgt2) + tgt2, self_attn_matrices = self.self_attn( + query=tgt2, + key=tgt2, + value=tgt2, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask) + tgt = tgt + self.dropout1(tgt2) + if not self.prenorm: + tgt = self.norm1(tgt) + if self.prenorm: + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout2(tgt2) + if not self.prenorm: + tgt = self.norm2(tgt) + return tgt, self_attn_matrices + + +class MultiHeadAttentionSpatial(nn.Module): + + def __init__( + self, + d_model, + n_head, + dropout=0.1, + spatial_multihead=True, + spatial_dim=5, + spatial_attn_fusion='mul', + ): + super().__init__() + assert d_model % n_head == 0, 'd_model: %d, n_head: %d' % (d_model, + n_head) + + self.n_head = n_head + self.d_model = d_model + self.d_per_head = d_model // n_head + self.spatial_multihead = spatial_multihead + self.spatial_dim = spatial_dim + self.spatial_attn_fusion = spatial_attn_fusion + + self.w_qs = nn.Linear(d_model, d_model) + self.w_ks = nn.Linear(d_model, d_model) + self.w_vs = nn.Linear(d_model, d_model) + + self.fc = nn.Linear(d_model, d_model) + self.dropout = nn.Dropout(p=dropout) + self.layer_norm = nn.LayerNorm(d_model) + + self.spatial_n_head = n_head if spatial_multihead else 1 + if self.spatial_attn_fusion in ['mul', 'bias', 'add']: + self.pairwise_loc_fc = nn.Linear(spatial_dim, self.spatial_n_head) + elif self.spatial_attn_fusion == 'ctx': + self.pairwise_loc_fc = nn.Linear(spatial_dim, d_model) + elif self.spatial_attn_fusion == 'cond': + self.lang_cond_fc = nn.Linear( + d_model, self.spatial_n_head * (spatial_dim + 1)) + else: + raise NotImplementedError('unsupported spatial_attn_fusion %s' % + (self.spatial_attn_fusion)) + + def forward(self, + q, + k, + v, + pairwise_locs, + key_padding_mask=None, + txt_embeds=None): + residual = q + q = einops.rearrange(self.w_qs(q), + 'b l (head k) -> head b l k', + head=self.n_head) + k = einops.rearrange(self.w_ks(k), + 'b t (head k) -> head b t k', + head=self.n_head) + v = einops.rearrange(self.w_vs(v), + 'b t (head v) -> head b t v', + head=self.n_head) + attn = torch.einsum('hblk,hbtk->hblt', q, k) / np.sqrt(q.shape[-1]) + + if self.spatial_attn_fusion in ['mul', 'bias', 'add']: + loc_attn = self.pairwise_loc_fc(pairwise_locs) + loc_attn = einops.rearrange(loc_attn, 'b l t h -> h b l t') + if self.spatial_attn_fusion == 'mul': + loc_attn = F.relu(loc_attn) + if not self.spatial_multihead: + loc_attn = einops.repeat(loc_attn, + 'h b l t -> (h nh) b l t', + nh=self.n_head) + elif self.spatial_attn_fusion == 'ctx': + loc_attn = self.pairwise_loc_fc(pairwise_locs) + loc_attn = einops.rearrange(loc_attn, + 'b l t (h k) -> h b l t k', + h=self.n_head) + loc_attn = torch.einsum('hblk,hbltk->hblt', q, loc_attn) / np.sqrt( + q.shape[-1]) + elif self.spatial_attn_fusion == 'cond': + spatial_weights = self.lang_cond_fc(residual) + spatial_weights = einops.rearrange(spatial_weights, + 'b l (h d) -> h b l d', + h=self.spatial_n_head, + d=self.spatial_dim + 1) + if self.spatial_n_head == 1: + spatial_weights = einops.repeat(spatial_weights, + '1 b l d -> h b l d', + h=self.n_head) + spatial_bias = spatial_weights[..., :1] + spatial_weights = spatial_weights[..., 1:] + loc_attn = torch.einsum('hbld,bltd->hblt', spatial_weights, + pairwise_locs) + spatial_bias + loc_attn = torch.sigmoid(loc_attn) + + if key_padding_mask is not None: + mask = einops.repeat(key_padding_mask, + 'b t -> h b l t', + h=self.n_head, + l=q.size(2)) + attn = attn.masked_fill(mask, -np.inf) + if self.spatial_attn_fusion in ['mul', 'cond']: + loc_attn = loc_attn.masked_fill(mask, 0) + else: + loc_attn = loc_attn.masked_fill(mask, -np.inf) + + if self.spatial_attn_fusion == 'add': + fused_attn = (torch.softmax(attn, 3) + + torch.softmax(loc_attn, 3)) / 2 + else: + if self.spatial_attn_fusion in ['mul', 'cond']: + fused_attn = torch.log(torch.clamp(loc_attn, min=1e-6)) + attn + else: + fused_attn = loc_attn + attn + fused_attn = torch.softmax(fused_attn, 3) + + assert torch.sum(torch.isnan(fused_attn) == 0), print(fused_attn) + + output = torch.einsum('hblt,hbtv->hblv', fused_attn, v) + output = einops.rearrange(output, 'head b l v -> b l (head v)') + output = self.dropout(self.fc(output)) + output = self.layer_norm(output + residual) + return output, fused_attn + + +class TransformerSpatialDecoderLayer(TransformerDecoderLayer): + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation='relu', + spatial_multihead=True, + spatial_dim=5, + spatial_attn_fusion='mul'): + super().__init__(d_model, + nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + activation=activation) + del self.self_attn + self.self_attn = MultiHeadAttentionSpatial( + d_model, + nhead, + dropout=dropout, + spatial_multihead=spatial_multihead, + spatial_dim=spatial_dim, + spatial_attn_fusion=spatial_attn_fusion, + ) + + def forward( + self, + tgt, + memory, + tgt_pairwise_locs: Optional[Tensor] = None, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + ): + tgt2 = self.norm1(tgt) + tgt2, self_attn_matrices = self.self_attn( + tgt2, + tgt2, + tgt2, + tgt_pairwise_locs, + key_padding_mask=tgt_key_padding_mask) + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2, cross_attn_matrices = self.multihead_attn( + query=tgt2, + key=memory, + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask) + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + return tgt, self_attn_matrices, cross_attn_matrices + + +class TransformerSpatialEncoderLayer(TransformerEncoderLayer): + + def __init__(self, + d_model, + nhead, + dim_feedforward=2048, + dropout=0.1, + activation='relu', + spatial_multihead=True, + spatial_dim=5, + spatial_attn_fusion='mul'): + super().__init__(d_model, + nhead, + dim_feedforward=dim_feedforward, + dropout=dropout, + activation=activation) + del self.self_attn + self.self_attn = MultiHeadAttentionSpatial( + d_model, + nhead, + dropout=dropout, + spatial_multihead=spatial_multihead, + spatial_dim=spatial_dim, + spatial_attn_fusion=spatial_attn_fusion, + ) + + def forward( + self, + tgt, + tgt_pairwise_locs, + tgt_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + ): + tgt2 = tgt + tgt2, self_attn_matrices = self.self_attn( + tgt2, + tgt2, + tgt2, + tgt_pairwise_locs, + key_padding_mask=tgt_key_padding_mask) + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + return tgt, self_attn_matrices + + +# Copied from transformers.models.llama.modeling_llama.LlamaRotaryEmbedding +class LlamaRotaryEmbedding(torch.nn.Module): + + def __init__(self, + dim, + max_position_embeddings=2048, + base=10000, + device=None): + super().__init__() + + self.dim = dim + self.max_position_embeddings = max_position_embeddings + self.base = base + inv_freq = 1.0 / (self.base**( + torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer('inv_freq', inv_freq, persistent=False) + + # Build here to make `torch.jit.trace` work. + self._set_cos_sin_cache(seq_len=max_position_embeddings, + device=self.inv_freq.device, + dtype=torch.get_default_dtype()) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, + device=device, + dtype=self.inv_freq.dtype) + + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', + emb.cos()[None, None, :, :].to(dtype), + persistent=False) + self.register_buffer('sin_cached', + emb.sin()[None, None, :, :].to(dtype), + persistent=False) + + def forward(self, x, seq_len=None): + # x: [bs, num_attention_heads, seq_len, head_size] + if seq_len > self.max_seq_len_cached: + self._set_cos_sin_cache(seq_len=seq_len, + device=x.device, + dtype=x.dtype) + + return ( + self.cos_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + self.sin_cached[:, :, :seq_len, ...].to(dtype=x.dtype), + ) + + +# Copied from transformers.models.llama.modeling_llama.LlamaLinearScalingRotaryEmbedding +class LlamaLinearScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with linear scaling. + + Credits to the Reddit user /u/kaiokendev + """ + + def __init__(self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + t = torch.arange(self.max_seq_len_cached, + device=device, + dtype=self.inv_freq.dtype) + t = t / self.scaling_factor + + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', + emb.cos()[None, None, :, :].to(dtype), + persistent=False) + self.register_buffer('sin_cached', + emb.sin()[None, None, :, :].to(dtype), + persistent=False) + + +# Copied from transformers.models.llama.modeling_llama.LlamaDynamicNTKScalingRotaryEmbedding +class LlamaDynamicNTKScalingRotaryEmbedding(LlamaRotaryEmbedding): + """LlamaRotaryEmbedding extended with Dynamic NTK scaling. + + Credits to the Reddit users /u/bloc97 and /u/emozilla + """ + + def __init__(self, + dim, + max_position_embeddings=2048, + base=10000, + device=None, + scaling_factor=1.0): + self.scaling_factor = scaling_factor + super().__init__(dim, max_position_embeddings, base, device) + + def _set_cos_sin_cache(self, seq_len, device, dtype): + self.max_seq_len_cached = seq_len + + if seq_len > self.max_position_embeddings: + base = self.base * ((self.scaling_factor * seq_len / + self.max_position_embeddings) - + (self.scaling_factor - 1))**(self.dim / + (self.dim - 2)) + inv_freq = 1.0 / (base**( + torch.arange(0, self.dim, 2).float().to(device) / self.dim)) + self.register_buffer('inv_freq', inv_freq, persistent=False) + + t = torch.arange(self.max_seq_len_cached, + device=device, + dtype=self.inv_freq.dtype) + + freqs = torch.einsum('i,j->ij', t, self.inv_freq) + # Different from paper, but it uses a different permutation in order to obtain the same calculation + emb = torch.cat((freqs, freqs), dim=-1) + self.register_buffer('cos_cached', + emb.cos()[None, None, :, :].to(dtype), + persistent=False) + self.register_buffer('sin_cached', + emb.sin()[None, None, :, :].to(dtype), + persistent=False) + + +def rotate_half(x): + """Rotates half the hidden dims of the input.""" + x1 = x[..., :x.shape[-1] // 2] + x2 = x[..., x.shape[-1] // 2:] + return torch.cat((-x2, x1), dim=-1) + + +def apply_rotary_pos_emb(q, k, cos, sin, position_ids): + # The first two dimensions of cos and sin are always 1, so we can `squeeze` them. + cos = cos.squeeze(1).squeeze(0) # [seq_len, dim] + sin = sin.squeeze(1).squeeze(0) # [seq_len, dim] + cos = cos[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + sin = sin[position_ids].unsqueeze(1) # [bs, 1, seq_len, dim] + q_embed = (q * cos) + (rotate_half(q) * sin) + k_embed = (k * cos) + (rotate_half(k) * sin) + return q_embed, k_embed + + +# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Attention +# Add option for RoPE +class GPT2Attention(nn.Module): + + def __init__(self, config, is_cross_attention=False, layer_idx=None): + super().__init__() + + max_positions = config.max_position_embeddings + self.register_buffer( + 'bias', + torch.tril( + torch.ones((max_positions, max_positions), + dtype=torch.bool)).view(1, 1, max_positions, + max_positions), + persistent=False, + ) + self.register_buffer('masked_bias', + torch.tensor(-1e4), + persistent=False) + + self.embed_dim = config.hidden_size + self.num_heads = config.num_attention_heads + self.head_dim = self.embed_dim // self.num_heads + self.split_size = self.embed_dim + if self.head_dim * self.num_heads != self.embed_dim: + raise ValueError( + f'`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:' + f' {self.num_heads}).') + + self.scale_attn_weights = config.scale_attn_weights + self.is_cross_attention = is_cross_attention + + # Layer-wise attention scaling, reordering, and upcasting + self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx + self.layer_idx = layer_idx + self.reorder_and_upcast_attn = config.reorder_and_upcast_attn + + if self.is_cross_attention: + self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim) + self.q_attn = Conv1D(self.embed_dim, self.embed_dim) + else: + self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim) + self.c_proj = Conv1D(self.embed_dim, self.embed_dim) + + self.attn_dropout = nn.Dropout(config.attn_pdrop) + self.resid_dropout = nn.Dropout(config.resid_pdrop) + + self.pruned_heads = set() + + self.use_rope = config.use_rope + if self.use_rope: + self._init_rope() + + def _init_rope(self): + if self.config.rope_scaling is None: + self.rotary_emb = LlamaRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings) + else: + scaling_type = self.config.rope_scaling['type'] + scaling_factor = self.config.rope_scaling['factor'] + if scaling_type == 'linear': + self.rotary_emb = LlamaLinearScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor) + elif scaling_type == 'dynamic': + self.rotary_emb = LlamaDynamicNTKScalingRotaryEmbedding( + self.head_dim, + max_position_embeddings=self.max_position_embeddings, + scaling_factor=scaling_factor) + else: + raise ValueError(f'Unknown RoPE scaling type {scaling_type}') + + def prune_heads(self, heads): + if len(heads) == 0: + return + heads, index = find_pruneable_heads_and_indices( + heads, self.num_heads, self.head_dim, self.pruned_heads) + index_attn = torch.cat( + [index, index + self.split_size, index + (2 * self.split_size)]) + + # Prune conv1d layers + self.c_attn = prune_conv1d_layer(self.c_attn, index_attn, dim=1) + self.c_proj = prune_conv1d_layer(self.c_proj, index, dim=0) + + # Update hyper params + self.split_size = (self.split_size // + self.num_heads) * (self.num_heads - len(heads)) + self.num_heads = self.num_heads - len(heads) + self.pruned_heads = self.pruned_heads.union(heads) + + def _attn(self, query, key, value, attention_mask=None, head_mask=None): + attn_weights = torch.matmul(query, key.transpose(-1, -2)) + + if self.scale_attn_weights: + attn_weights = attn_weights / torch.full( + [], + value.size(-1)**0.5, + dtype=attn_weights.dtype, + device=attn_weights.device) + + # Layer-wise attention scaling + if self.scale_attn_by_inverse_layer_idx: + attn_weights = attn_weights / float(self.layer_idx + 1) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - + query_length:key_length, :key_length] + mask_value = torch.finfo(attn_weights.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.full([], mask_value, + dtype=attn_weights.dtype).to( + attn_weights.device) + attn_weights = torch.where(causal_mask, + attn_weights.to(attn_weights.dtype), + mask_value) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise + attn_weights = attn_weights.type(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _upcast_and_reordered_attn(self, + query, + key, + value, + attention_mask=None, + head_mask=None): + # Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM) + bsz, num_heads, q_seq_len, dk = query.size() + _, _, k_seq_len, _ = key.size() + + # Preallocate attn_weights for `baddbmm` + attn_weights = torch.empty(bsz * num_heads, + q_seq_len, + k_seq_len, + dtype=torch.float32, + device=query.device) + + # Compute Scale Factor + scale_factor = 1.0 + if self.scale_attn_weights: + scale_factor /= float(value.size(-1))**0.5 + + if self.scale_attn_by_inverse_layer_idx: + scale_factor /= float(self.layer_idx + 1) + + # Upcast (turn off torch.cuda.amp.autocast) and reorder (Scale K by 1 / root(dk)) + with torch.cuda.amp.autocast(enabled=False): + q, k = query.reshape(-1, q_seq_len, + dk), key.transpose(-1, -2).reshape( + -1, dk, k_seq_len) + attn_weights = torch.baddbmm(attn_weights, + q.float(), + k.float(), + beta=0, + alpha=scale_factor) + attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, + k_seq_len) + + if not self.is_cross_attention: + # if only "normal" attention layer implements causal mask + query_length, key_length = query.size(-2), key.size(-2) + causal_mask = self.bias[:, :, key_length - + query_length:key_length, :key_length] + mask_value = torch.finfo(attn_weights.dtype).min + # Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`. + # Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device` + mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype).to( + attn_weights.device) + attn_weights = torch.where(causal_mask, attn_weights, mask_value) + + if attention_mask is not None: + # Apply the attention mask + attn_weights = attn_weights + attention_mask + + attn_weights = nn.functional.softmax(attn_weights, dim=-1) + + # Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise + if attn_weights.dtype != torch.float32: + raise RuntimeError( + 'Error with upcasting, attn_weights does not have dtype torch.float32' + ) + attn_weights = attn_weights.type(value.dtype) + attn_weights = self.attn_dropout(attn_weights) + + # Mask heads if we want to + if head_mask is not None: + attn_weights = attn_weights * head_mask + + attn_output = torch.matmul(attn_weights, value) + + return attn_output, attn_weights + + def _split_heads(self, tensor, num_heads, attn_head_size): + """Splits hidden_size dim into attn_head_size and num_heads.""" + new_shape = tensor.size()[:-1] + (num_heads, attn_head_size) + tensor = tensor.view(new_shape) + return tensor.permute(0, 2, 1, + 3) # (batch, head, seq_length, head_features) + + def _merge_heads(self, tensor, num_heads, attn_head_size): + """Merges attn_head_size dim and num_attn_heads dim into + hidden_size.""" + tensor = tensor.permute(0, 2, 1, 3).contiguous() + new_shape = tensor.size()[:-2] + (num_heads * attn_head_size, ) + return tensor.view(new_shape) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Tuple[Union[torch.Tensor, Tuple[torch.Tensor]], ...]: + if encoder_hidden_states is not None: + if not hasattr(self, 'q_attn'): + raise ValueError( + 'If class is used as cross attention, the weights `q_attn` have to be defined. ' + 'Please make sure to instantiate class with `CausalAttention(..., is_cross_attention=True)`.' + ) + + query = self.q_attn(hidden_states) + key, value = self.c_attn(encoder_hidden_states).split( + self.split_size, dim=2) + attention_mask = encoder_attention_mask + else: + query, key, value = self.c_attn(hidden_states).split( + self.split_size, dim=2) + + query = self._split_heads(query, self.num_heads, self.head_dim) + key = self._split_heads(key, self.num_heads, self.head_dim) + value = self._split_heads(value, self.num_heads, self.head_dim) + + # only add RoPE in self-attention + # add RoPE embedding before concating past kv (they already have RoPE) + if encoder_hidden_states is None and self.use_rope: + kv_seq_len = key.shape[-2] + if layer_past is not None: + kv_seq_len += layer_past[0].shape[-2] + cos, sin = self.rotary_emb(value, seq_len=kv_seq_len) + query, key = apply_rotary_pos_emb(query, key, cos, sin, + position_ids) + + if layer_past is not None: + past_key, past_value = layer_past + key = torch.cat((past_key, key), dim=-2) + value = torch.cat((past_value, value), dim=-2) + + if use_cache is True: + present = (key, value) + else: + present = None + + if self.reorder_and_upcast_attn: + attn_output, attn_weights = self._upcast_and_reordered_attn( + query, key, value, attention_mask, head_mask) + else: + attn_output, attn_weights = self._attn(query, key, value, + attention_mask, head_mask) + + attn_output = self._merge_heads(attn_output, self.num_heads, + self.head_dim) + attn_output = self.c_proj(attn_output) + attn_output = self.resid_dropout(attn_output) + + outputs = (attn_output, present) + if output_attentions: + outputs += (attn_weights, ) + + return outputs # a, present, (attentions) + + +# Copied from transformers.models.gpt2.modeling_gpt2.GPT2MLP +class GPT2MLP(nn.Module): + + def __init__(self, intermediate_size, config): + super().__init__() + embed_dim = config.hidden_size + self.c_fc = Conv1D(intermediate_size, embed_dim) + self.c_proj = Conv1D(embed_dim, intermediate_size) + self.act = ACT2FN[config.activation_function] + self.dropout = nn.Dropout(config.resid_pdrop) + + def forward( + self, hidden_states: Optional[Tuple[torch.FloatTensor]] + ) -> torch.FloatTensor: + hidden_states = self.c_fc(hidden_states) + hidden_states = self.act(hidden_states) + hidden_states = self.c_proj(hidden_states) + hidden_states = self.dropout(hidden_states) + return hidden_states + + +# Copied from transformers.models.gpt2.modeling_gpt2.GPT2Block +# Add option for RoPE +class GPT2Block(nn.Module): + + def __init__(self, config, layer_idx=None): + super().__init__() + hidden_size = config.hidden_size + inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size + + self.ln_1 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + self.attn = GPT2Attention(config, layer_idx=layer_idx) + self.ln_2 = nn.LayerNorm(hidden_size, eps=config.layer_norm_epsilon) + + if config.add_cross_attention: + self.crossattention = GPT2Attention(config, + is_cross_attention=True, + layer_idx=layer_idx) + self.ln_cross_attn = nn.LayerNorm(hidden_size, + eps=config.layer_norm_epsilon) + + self.mlp = GPT2MLP(inner_dim, config) + + def forward( + self, + hidden_states: Optional[Tuple[torch.FloatTensor]], + layer_past: Optional[Tuple[torch.Tensor]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = False, + output_attentions: Optional[bool] = False, + ) -> Union[Tuple[torch.Tensor], Optional[Tuple[torch.Tensor, Tuple[ + torch.FloatTensor, ...]]]]: + residual = hidden_states + hidden_states = self.ln_1(hidden_states) + attn_outputs = self.attn( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + attn_output = attn_outputs[0] # output_attn: a, present, (attentions) + outputs = attn_outputs[1:] + # residual connection + hidden_states = attn_output + residual + + if encoder_hidden_states is not None: + # add one self-attention block for cross-attention + if not hasattr(self, 'crossattention'): + raise ValueError( + f'If `encoder_hidden_states` are passed, {self} has to be instantiated with ' + 'cross-attention layers by setting `config.add_cross_attention=True`' + ) + residual = hidden_states + hidden_states = self.ln_cross_attn(hidden_states) + cross_attn_outputs = self.crossattention( + hidden_states, + attention_mask=attention_mask, + head_mask=head_mask, + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + output_attentions=output_attentions, + ) + attn_output = cross_attn_outputs[0] + # residual connection + hidden_states = residual + attn_output + outputs = outputs + cross_attn_outputs[ + 2:] # add cross attentions if we output attention weights + + residual = hidden_states + hidden_states = self.ln_2(hidden_states) + feed_forward_hidden_states = self.mlp(hidden_states) + # residual connection + hidden_states = residual + feed_forward_hidden_states + + if use_cache: + outputs = (hidden_states, ) + outputs + else: + outputs = (hidden_states, ) + outputs[1:] + + return outputs # hidden_states, present, (attentions, cross_attentions) + + +class GPT2Model(nn.Module): + + def __init__(self, config): + super().__init__(config) + + self.embed_dim = config.hidden_size + + self.wte = nn.Embedding(config.vocab_size, self.embed_dim) + if not config.use_rope: + self.wpe = nn.Embedding(config.max_position_embeddings, + self.embed_dim) + else: + self.wpe = None + + self.drop = nn.Dropout(config.embd_pdrop) + self.h = nn.ModuleList([ + GPT2Block(config, layer_idx=i) + for i in range(config.num_hidden_layers) + ]) + self.ln_f = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_epsilon) + + # Model parallel + self.model_parallel = False + self.device_map = None + self.gradient_checkpointing = False + + # Initialize weights and apply final processing + self.apply(self._init_weights) + + def _init_weights(self, module): + """Initialize the weights.""" + if isinstance(module, (nn.Linear, Conv1D)): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, + std=self.config.initializer_range) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, + std=self.config.initializer_range) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + # Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme: + # > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale + # > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers. + # > -- GPT-2 :: https://openai.com/blog/better-language-models/ + # + # Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py + for name, p in module.named_parameters(): + if 'c_proj' in name and 'weight' in name: + # Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block + p.data.normal_(mean=0.0, + std=(self.config.initializer_range / + math.sqrt(2 * self.config.n_layer))) + + def _set_gradient_checkpointing(self, module, value=False): + if isinstance(module, GPT2Model): + module.gradient_checkpointing = value + + def get_input_embeddings(self): + return self.wte + + def set_input_embeddings(self, new_embeddings): + self.wte = new_embeddings + + # Copied from transformers.models.gpt2.modeling_gpt2.GPT2Model.forward + def forward( + self, + input_ids: Optional[torch.LongTensor] = None, + past_key_values: Optional[Tuple[Tuple[torch.Tensor]]] = None, + attention_mask: Optional[torch.FloatTensor] = None, + token_type_ids: Optional[torch.LongTensor] = None, + position_ids: Optional[torch.LongTensor] = None, + head_mask: Optional[torch.FloatTensor] = None, + inputs_embeds: Optional[torch.FloatTensor] = None, + encoder_hidden_states: Optional[torch.Tensor] = None, + encoder_attention_mask: Optional[torch.FloatTensor] = None, + use_cache: Optional[bool] = None, + output_attentions: Optional[bool] = None, + output_hidden_states: Optional[bool] = None, + return_dict: Optional[bool] = None, + ) -> Union[Tuple, BaseModelOutputWithPastAndCrossAttentions]: + output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions + output_hidden_states = (output_hidden_states + if output_hidden_states is not None else + self.config.output_hidden_states) + use_cache = use_cache if use_cache is not None else self.config.use_cache + return_dict = return_dict if return_dict is not None else self.config.use_return_dict + + if input_ids is not None and inputs_embeds is not None: + raise ValueError( + 'You cannot specify both input_ids and inputs_embeds at the same time' + ) + elif input_ids is not None: + input_shape = input_ids.size() + input_ids = input_ids.view(-1, input_shape[-1]) + batch_size = input_ids.shape[0] + elif inputs_embeds is not None: + input_shape = inputs_embeds.size()[:-1] + batch_size = inputs_embeds.shape[0] + else: + raise ValueError( + 'You have to specify either input_ids or inputs_embeds') + + device = input_ids.device if input_ids is not None else inputs_embeds.device + + if token_type_ids is not None: + token_type_ids = token_type_ids.view(-1, input_shape[-1]) + if position_ids is not None: + position_ids = position_ids.view(-1, input_shape[-1]) + + if past_key_values is None: + past_length = 0 + past_key_values = tuple([None] * len(self.h)) + else: + past_length = past_key_values[0][0].size(-2) + if position_ids is None: + position_ids = torch.arange(past_length, + input_shape[-1] + past_length, + dtype=torch.long, + device=device) + position_ids = position_ids.unsqueeze(0).view(-1, input_shape[-1]) + + # GPT2Attention mask. + if attention_mask is not None: + if batch_size <= 0: + raise ValueError('batch_size has to be defined and > 0') + attention_mask = attention_mask.view(batch_size, -1) + # We create a 3D attention mask from a 2D tensor mask. + # Sizes are [batch_size, 1, 1, to_seq_length] + # So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length] + # this attention mask is more simple than the triangular masking of causal attention + # used in OpenAI GPT, we just need to prepare the broadcast dimension here. + attention_mask = attention_mask[:, None, None, :] + + # Since attention_mask is 1.0 for positions we want to attend and 0.0 for + # masked positions, this operation will create a tensor which is 0.0 for + # positions we want to attend and the dtype's smallest value for masked positions. + # Since we are adding it to the raw scores before the softmax, this is + # effectively the same as removing these entirely. + attention_mask = attention_mask.to( + dtype=self.dtype) # fp16 compatibility + attention_mask = (1.0 - attention_mask) * torch.finfo( + self.dtype).min + + # If a 2D or 3D attention mask is provided for the cross-attention + # we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length] + if self.config.add_cross_attention and encoder_hidden_states is not None: + encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size( + ) + encoder_hidden_shape = (encoder_batch_size, + encoder_sequence_length) + if encoder_attention_mask is None: + encoder_attention_mask = torch.ones(encoder_hidden_shape, + device=device) + encoder_attention_mask = self.invert_attention_mask( + encoder_attention_mask) + else: + encoder_attention_mask = None + + # Prepare head mask if needed + # 1.0 in head_mask indicate we keep the head + # attention_probs has shape bsz x n_heads x N x N + # head_mask has shape n_layer x batch x n_heads x N x N + head_mask = self.get_head_mask(head_mask, self.config.n_layer) + + if inputs_embeds is None: + inputs_embeds = self.wte(input_ids) + if self.wpe: + position_embeds = self.wpe(position_ids) + hidden_states = inputs_embeds + position_embeds + else: + hidden_states = inputs_embeds + + if token_type_ids is not None: + token_type_embeds = self.wte(token_type_ids) + hidden_states = hidden_states + token_type_embeds + + hidden_states = self.drop(hidden_states) + + output_shape = input_shape + (hidden_states.size(-1), ) + + if self.gradient_checkpointing and self.training: + if use_cache: + logger.warning_once( + '`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`...' + ) + use_cache = False + + presents = () if use_cache else None + all_self_attentions = () if output_attentions else None + all_cross_attentions = ( + ) if output_attentions and self.config.add_cross_attention else None + all_hidden_states = () if output_hidden_states else None + for i, (block, layer_past) in enumerate(zip(self.h, past_key_values)): + # Model parallel + if self.model_parallel: + torch.cuda.set_device(hidden_states.device) + # Ensure layer_past is on same device as hidden_states (might not be correct) + if layer_past is not None: + layer_past = tuple( + past_state.to(hidden_states.device) + for past_state in layer_past) + # Ensure that attention_mask is always on the same device as hidden_states + if attention_mask is not None: + attention_mask = attention_mask.to(hidden_states.device) + if isinstance(head_mask, torch.Tensor): + head_mask = head_mask.to(hidden_states.device) + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if self.gradient_checkpointing and self.training: + + def create_custom_forward(module): + + def custom_forward(*inputs): + # None for past_key_value + return module(*inputs, use_cache, output_attentions) + + return custom_forward + + outputs = torch.utils.checkpoint.checkpoint( + create_custom_forward(block), + hidden_states, + None, + attention_mask, + position_ids, + head_mask[i], + encoder_hidden_states, + encoder_attention_mask, + ) + else: + outputs = block( + hidden_states, + layer_past=layer_past, + attention_mask=attention_mask, + position_ids=position_ids, + head_mask=head_mask[i], + encoder_hidden_states=encoder_hidden_states, + encoder_attention_mask=encoder_attention_mask, + use_cache=use_cache, + output_attentions=output_attentions, + ) + + hidden_states = outputs[0] + if use_cache is True: + presents = presents + (outputs[1], ) + + if output_attentions: + all_self_attentions = all_self_attentions + ( + outputs[2 if use_cache else 1], ) + if self.config.add_cross_attention: + all_cross_attentions = all_cross_attentions + ( + outputs[3 if use_cache else 2], ) + + # Model Parallel: If it's the last layer for that device, put things on the next device + if self.model_parallel: + for k, v in self.device_map.items(): + if i == v[-1] and 'cuda:' + str(k) != self.last_device: + hidden_states = hidden_states.to('cuda:' + str(k + 1)) + + hidden_states = self.ln_f(hidden_states) + + hidden_states = hidden_states.view(output_shape) + # Add last hidden state + if output_hidden_states: + all_hidden_states = all_hidden_states + (hidden_states, ) + + if not return_dict: + return tuple(v for v in [ + hidden_states, presents, all_hidden_states, + all_self_attentions, all_cross_attentions + ] if v is not None) + + return BaseModelOutputWithPastAndCrossAttentions( + last_hidden_state=hidden_states, + past_key_values=presents, + hidden_states=all_hidden_states, + attentions=all_self_attentions, + cross_attentions=all_cross_attentions, + ) diff --git a/models/LEO/model/utils.py b/models/LEO/model/utils.py new file mode 100755 index 0000000..edba35c --- /dev/null +++ b/models/LEO/model/utils.py @@ -0,0 +1,139 @@ +import contextlib +import copy + +import einops +import torch +import torch.nn as nn +import torch.nn.functional as F + + +def disabled_train(self, mode=True): + """Overwrite model.train with this function to make sure train/eval mode + does not change anymore.""" + return self + + +def maybe_autocast(model, dtype='bf16', enabled=True): + # if on cpu, don't use autocast + # if on gpu, use autocast with dtype if provided, otherwise use torch.float16 + enable_autocast = model.device != torch.device('cpu') + + if dtype == 'bf16': + dtype = torch.bfloat16 + elif dtype == 'fp16': + dtype = torch.float16 + else: + dtype = torch.float32 + + if enable_autocast: + return torch.cuda.amp.autocast(dtype=dtype, enabled=enabled) + else: + return contextlib.nullcontext() + + +def _init_weights_bert(module, std=0.02): + """Huggingface transformer weight initialization, most commonly for bert + initialization.""" + if isinstance(module, nn.Linear): + # Slightly different from the TF version which uses truncated_normal for initialization + # cf https://github.com/pytorch/pytorch/pull/5617 + module.weight.data.normal_(mean=0.0, std=std) + if module.bias is not None: + module.bias.data.zero_() + elif isinstance(module, nn.Embedding): + module.weight.data.normal_(mean=0.0, std=std) + if module.padding_idx is not None: + module.weight.data[module.padding_idx].zero_() + elif isinstance(module, nn.LayerNorm): + module.bias.data.zero_() + module.weight.data.fill_(1.0) + + +######################################################### +# General modules helpers +######################################################### +def get_activation_fn(activation_type): + if activation_type not in ['relu', 'gelu', 'glu']: + raise RuntimeError( + f'activation function currently support relu/gelu, not {activation_type}' + ) + return getattr(F, activation_type) + + +def get_mlp_head(input_size, hidden_size, output_size, dropout=0): + return nn.Sequential(*[ + nn.Linear(input_size, hidden_size), + nn.ReLU(), + nn.LayerNorm(hidden_size, eps=1e-12), + nn.Dropout(dropout), + nn.Linear(hidden_size, output_size) + ]) + + +def layer_repeat(module, N): + return nn.ModuleList([copy.deepcopy(module) + for _ in range(N - 1)] + [module]) + + +######################################################### +# Specific modules helpers +######################################################### +def calc_pairwise_locs(obj_centers, + obj_whls, + eps=1e-10, + pairwise_rel_type='center', + spatial_dist_norm=True, + spatial_dim=5): + if pairwise_rel_type == 'mlp': + obj_locs = torch.cat([obj_centers, obj_whls], 2) + pairwise_locs = torch.cat([ + einops.repeat(obj_locs, 'b l d -> b l x d', x=obj_locs.size(1)), + einops.repeat(obj_locs, 'b l d -> b x l d', x=obj_locs.size(1)) + ], + dim=3) + return pairwise_locs + + pairwise_locs = einops.repeat(obj_centers, 'b l d -> b l 1 d') \ + - einops.repeat(obj_centers, 'b l d -> b 1 l d') + pairwise_dists = torch.sqrt(torch.sum(pairwise_locs**2, 3) + + eps) # (b, l, l) + if spatial_dist_norm: + max_dists = torch.max(pairwise_dists.view(pairwise_dists.size(0), -1), + dim=1)[0] + norm_pairwise_dists = pairwise_dists / einops.repeat( + max_dists, 'b -> b 1 1') + else: + norm_pairwise_dists = pairwise_dists + + if spatial_dim == 1: + return norm_pairwise_dists.unsqueeze(3) + + pairwise_dists_2d = torch.sqrt( + torch.sum(pairwise_locs[..., :2]**2, 3) + eps) + if pairwise_rel_type == 'center': + pairwise_locs = torch.stack([ + norm_pairwise_dists, pairwise_locs[..., 2] / pairwise_dists, + pairwise_dists_2d / pairwise_dists, pairwise_locs[..., 1] / + pairwise_dists_2d, pairwise_locs[..., 0] / pairwise_dists_2d + ], + dim=3) + elif pairwise_rel_type == 'vertical_bottom': + bottom_centers = torch.clone(obj_centers) + bottom_centers[:, :, 2] -= obj_whls[:, :, 2] + bottom_pairwise_locs = einops.repeat(bottom_centers, 'b l d -> b l 1 d') \ + - einops.repeat(bottom_centers, 'b l d -> b 1 l d') + bottom_pairwise_dists = torch.sqrt( + torch.sum(bottom_pairwise_locs**2, 3) + eps) # (b, l, l) + bottom_pairwise_dists_2d = torch.sqrt( + torch.sum(bottom_pairwise_locs[..., :2]**2, 3) + eps) + pairwise_locs = torch.stack([ + norm_pairwise_dists, bottom_pairwise_locs[..., 2] / + bottom_pairwise_dists, bottom_pairwise_dists_2d / + bottom_pairwise_dists, pairwise_locs[..., 1] / pairwise_dists_2d, + pairwise_locs[..., 0] / pairwise_dists_2d + ], + dim=3) + + if spatial_dim == 4: + pairwise_locs = pairwise_locs[..., 1:] + return pairwise_locs diff --git a/models/LEO/model/vision2d.py b/models/LEO/model/vision2d.py new file mode 100644 index 0000000..98e5114 --- /dev/null +++ b/models/LEO/model/vision2d.py @@ -0,0 +1,229 @@ +import os + +import numpy as np +import timm +import torch +import torch.nn as nn +from accelerate.logging import get_logger +from einops import rearrange +from model.build import MODULE_REGISTRY +from model.utils import disabled_train + +logger = get_logger(__name__) + + +def simple_conv_and_linear_weights_init(m): + if type(m) in [ + nn.Conv1d, + nn.Conv2d, + nn.Conv3d, + nn.ConvTranspose1d, + nn.ConvTranspose2d, + nn.ConvTranspose3d, + ]: + weight_shape = list(m.weight.data.size()) + fan_in = np.prod(weight_shape[1:4]) + fan_out = np.prod(weight_shape[2:4]) * weight_shape[0] + w_bound = np.sqrt(6.0 / (fan_in + fan_out)) + m.weight.data.uniform_(-w_bound, w_bound) + if m.bias is not None: + m.bias.data.fill_(0) + elif type(m) == nn.Linear: + simple_linear_weights_init(m) + + +def simple_linear_weights_init(m): + if type(m) == nn.Linear: + weight_shape = list(m.weight.data.size()) + fan_in = weight_shape[1] + fan_out = weight_shape[0] + w_bound = np.sqrt(6.0 / (fan_in + fan_out)) + m.weight.data.uniform_(-w_bound, w_bound) + if m.bias is not None: + m.bias.data.fill_(0) + + +@MODULE_REGISTRY.register() +class GridFeatureExtractor2D(nn.Module): + + def __init__(self, cfg): + super().__init__() + + init_func_name = '_'.join( + [cfg.backbone_name, cfg.backbone_pretrain_dataset]) + init_func = globals().get(init_func_name) + if init_func and callable(init_func): + self.backbone = init_func(pretrained=cfg.use_pretrain, + freeze=cfg.freeze) + else: + raise NotImplementedError( + f'Backbone2D does not support {init_func_name}') + + self.pooling = cfg.pooling + if self.pooling: + if self.pooling == 'avg': + self.pooling_layers = nn.Sequential( + nn.AdaptiveAvgPool2d(output_size=(1, 1)), nn.Flatten()) + self.out_channels = self.backbone.out_channels + elif self.pooling == 'conv': + self.pooling_layers = nn.Sequential( + nn.Conv2d(self.backbone.out_channels, 64, 1), + nn.ReLU(inplace=True), nn.Conv2d(64, 32, 1), nn.Flatten()) + self.pooling_layers.apply(simple_conv_and_linear_weights_init) + self.out_channels = 32 * 7 * 7 # hardcode for 224x224 + elif self.pooling in ['attn', 'attention']: + self.visual_attention = nn.Sequential( + nn.Conv2d(self.backbone.out_channels, + self.backbone.out_channels, 1), + nn.ReLU(inplace=True), + nn.Conv2d(self.backbone.out_channels, + self.backbone.out_channels, 1), + ) + self.visual_attention.apply( + simple_conv_and_linear_weights_init) + + def _attention_pooling(x): + B, C, H, W = x.size() + attn = self.visual_attention(x) + attn = attn.view(B, C, -1) + x = x.view(B, C, -1) + attn = attn.softmax(dim=-1) + x = torch.einsum('b c n, b c n -> b c', x, x) + return x + + self.pooling_layers = _attention_pooling + self.out_channels = self.backbone.out_channels + else: + raise NotImplementedError( + f'Backbone2D does not support {self.pooling} pooling') + else: + self.out_channels = self.backbone.out_channels + + logger.info(f'Build Backbone2D: {init_func_name}, ' + + f'pretrain = {cfg.use_pretrain}, freeze = {cfg.freeze}, ' + + f'pooling = {self.pooling if self.pooling else None}') + + def forward(self, x): + if self.pooling: + x = self.backbone(x, flat_output=False) + x = self.pooling_layers(x).unsqueeze(1) + return x + else: + return self.backbone(x, flat_output=True) + + +class Backbone2DWrapper(nn.Module): + + def __init__(self, model, tag, freeze=True): + super().__init__() + self.model = model + self.tag = tag + self.freeze = freeze + if 'convnext' in tag: + self.out_channels = 1024 + elif 'swin' in tag: + self.out_channels = 1024 + elif 'vit' in tag: + self.out_channels = 768 + elif 'resnet' in tag: + self.out_channels = 2048 + else: + raise NotImplementedError + + if freeze: + for param in self.parameters(): + param.requires_grad = False + self.eval() + self.train = disabled_train + + def forward_normal(self, x, flat_output=False): + feat = self.model.forward_features(x) + if 'swin' in self.tag: + feat = rearrange(feat, 'b h w c -> b c h w') + if 'vit_base_32_timm_laion2b' in self.tag or 'vit_base_32_timm_openai' in self.tag: + # TODO: [CLS] is prepended to the patches. + feat = rearrange(feat[:, 1:], 'b (h w) c -> b c h w', h=7) + if flat_output: + feat = rearrange(feat, 'b c h w -> b (h w) c') + return feat + + @torch.no_grad() + def forward_frozen(self, x, flat_output=False): + return self.forward_normal(x, flat_output) + + def forward(self, x, flat_output=False): + if self.freeze: + return self.forward_frozen(x, flat_output) + else: + return self.forward_normal(x, flat_output) + + +# 1024x7x7 or 49x1024 +def convnext_base_in1k(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model('convnext_base', + pretrained=pretrained), + 'convnext_base_in1k', + freeze=freeze) + + +# 1024x7x7 or 49x1024 +def convnext_base_in22k(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model('convnext_base_in22k', + pretrained=pretrained), + 'convnext_base_in22k', + freeze=freeze) + + +# 1024x7x7 or 49x1024 +def convnext_base_laion2b(pretrained=False, freeze=True, **kwargs): + m = timm.create_model('convnext_base.clip_laion2b', pretrained=pretrained) + if kwargs.get('reset_clip_s2b2'): + logger.debug( + 'Resetting the last conv layer of convnext-base to random init.') + s = m.state_dict() + for i in s.keys(): + if 'stages.3.blocks.2' in i and ('weight' in i or 'bias' in i): + s[i].normal_() + m.load_state_dict(s, strict=True) + + return Backbone2DWrapper(m, 'convnext_base_laion2b', freeze=freeze) + + +# 1024x7x7 or 49x1024 +def swin_base_in1k(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model('swin_base_patch4_window7_224', + pretrained=pretrained), + 'swin_base_timm_in1k', + freeze=freeze) + + +# 1024x7x7 or 49x1024 +def swin_base_in22k(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model( + 'swin_base_patch4_window7_224_in22k', pretrained=pretrained), + 'swin_base_timm_in22k', + freeze=freeze) + + +# 768x7x7 or 49x768 +def vit_b_32_laion2b(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model( + 'vit_base_patch32_clip_224.laion2b', pretrained=pretrained), + 'vit_base_32_timm_laion2b', + freeze=freeze) + + +# 768x7x7 or 49x768 +def vit_b_32_openai(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model( + 'vit_base_patch32_clip_224.openai', pretrained=pretrained), + 'vit_base_32_timm_openai', + freeze=freeze) + + +# 2048x7x7 or 49x2048 +def resnet_50_in1k(pretrained=False, freeze=True, **kwargs): + return Backbone2DWrapper(timm.create_model('resnet50.gluon_in1k', + pretrained=pretrained), + 'resnet50_timm_in1k', + freeze=freeze) diff --git a/models/LEO/model/vision3d.py b/models/LEO/model/vision3d.py new file mode 100644 index 0000000..47460c0 --- /dev/null +++ b/models/LEO/model/vision3d.py @@ -0,0 +1,220 @@ +import numpy as np +import torch +import torch.nn as nn +from accelerate.logging import get_logger +from model.build import MODULE_REGISTRY +from model.pcd_backbone import PointcloudBackbone +from model.transformers import (TransformerEncoderLayer, + TransformerSpatialEncoderLayer) +from model.utils import _init_weights_bert, calc_pairwise_locs, layer_repeat + +logger = get_logger(__name__) + + +def generate_fourier_features(pos, + num_bands=10, + max_freq=15, + concat_pos=True, + sine_only=False): + # Input: B, N, C + # Output: B, N, C' + batch_size = pos.shape[0] + device = pos.device + + min_freq = 1.0 + # Nyquist frequency at the target resolution: + freq_bands = torch.linspace(start=min_freq, + end=max_freq, + steps=num_bands, + device=device) + + # Get frequency bands for each spatial dimension. + # Output is size [n, d * num_bands] + per_pos_features = pos.unsqueeze(-1).repeat(1, 1, 1, + num_bands) * freq_bands + per_pos_features = torch.reshape( + per_pos_features, + [batch_size, -1, np.prod(per_pos_features.shape[2:])]) + if sine_only: + # Output is size [n, d * num_bands] + per_pos_features = torch.sin(np.pi * (per_pos_features)) + else: + # Output is size [n, 2 * d * num_bands] + per_pos_features = torch.cat([ + torch.sin(np.pi * per_pos_features), + torch.cos(np.pi * per_pos_features) + ], + dim=-1) + # Concatenate the raw input positions. + if concat_pos: + # Adds d bands to the encoding. + per_pos_features = torch.cat( + [pos, per_pos_features.expand(batch_size, -1, -1)], dim=-1) + return per_pos_features + + +@MODULE_REGISTRY.register() +class OSE3D(nn.Module): + # Open-vocabulary, Spatial-attention, Embodied-token, 3D-agent + def __init__(self, cfg): + super().__init__() + self.use_spatial_attn = cfg.use_spatial_attn # spatial attention + self.use_embodied_token = cfg.use_embodied_token # embodied token + hidden_dim = cfg.hidden_dim + + # pcd backbone + self.obj_encoder = PointcloudBackbone(cfg.backbone) + self.obj_proj = nn.Linear(self.obj_encoder.out_dim, hidden_dim) + + # embodied token + if self.use_embodied_token: + self.anchor_feat = nn.Parameter(torch.zeros(1, 1, hidden_dim)) + self.anchor_size = nn.Parameter(torch.ones(1, 1, 3)) + self.orient_encoder = nn.Linear(cfg.fourier_size, hidden_dim) + self.obj_type_embed = nn.Embedding(2, hidden_dim) + + # spatial encoder + if self.use_spatial_attn: + spatial_encoder_layer = TransformerSpatialEncoderLayer( + d_model=hidden_dim, + nhead=cfg.spatial_encoder.num_attention_heads, + dim_feedforward=cfg.spatial_encoder.dim_feedforward, + dropout=cfg.spatial_encoder.dropout, + activation=cfg.spatial_encoder.activation, + spatial_dim=cfg.spatial_encoder.spatial_dim, + spatial_multihead=cfg.spatial_encoder.spatial_multihead, + spatial_attn_fusion=cfg.spatial_encoder.spatial_attn_fusion, + ) + else: + spatial_encoder_layer = TransformerEncoderLayer( + d_model=hidden_dim, + nhead=cfg.spatial_encoder.num_attention_heads, + dim_feedforward=cfg.spatial_encoder.dim_feedforward, + dropout=cfg.spatial_encoder.dropout, + activation=cfg.spatial_encoder.activation, + ) + + self.spatial_encoder = layer_repeat( + spatial_encoder_layer, + cfg.spatial_encoder.num_layers, + ) + self.pairwise_rel_type = cfg.spatial_encoder.pairwise_rel_type + self.spatial_dist_norm = cfg.spatial_encoder.spatial_dist_norm + self.spatial_dim = cfg.spatial_encoder.spatial_dim + self.obj_loc_encoding = cfg.spatial_encoder.obj_loc_encoding + + # location encoding + if self.obj_loc_encoding in ['same_0', 'same_all']: + num_loc_layers = 1 + elif self.obj_loc_encoding == 'diff_all': + num_loc_layers = cfg.spatial_encoder.num_layers + + loc_layer = nn.Sequential( + nn.Linear(cfg.spatial_encoder.dim_loc, hidden_dim), + nn.LayerNorm(hidden_dim), + ) + self.loc_layers = layer_repeat(loc_layer, num_loc_layers) + + # logger.info("Build 3D module: OSE3D") + + # only initialize spatial encoder and loc layers + self.spatial_encoder.apply(_init_weights_bert) + self.loc_layers.apply(_init_weights_bert) + + if self.use_embodied_token: + nn.init.normal_(self.anchor_feat, std=0.02) + + @property + def device(self): + return list(self.parameters())[0].device + + def forward(self, data_dict): + """data_dict requires keys: + + obj_fts: (B, N, P, 6), xyz + rgb + obj_masks: (B, N), 1 valid and 0 masked + obj_locs: (B, N, 6), xyz + whd + anchor_locs: (B, 3) + anchor_orientation: (B, C) + """ + + obj_feats = self.obj_encoder(data_dict['obj_fts']) + obj_feats = self.obj_proj(obj_feats) + obj_masks = ~data_dict[ + 'obj_masks'] # flipped due to different convention of TransformerEncoder + + B, N = obj_feats.shape[:2] + device = obj_feats.device + + obj_type_ids = torch.zeros((B, N), dtype=torch.long, device=device) + obj_type_embeds = self.obj_type_embed(obj_type_ids) + + if self.use_embodied_token: + # anchor feature + anchor_orient = data_dict['anchor_orientation'].unsqueeze(1) + anchor_orient_feat = self.orient_encoder( + generate_fourier_features(anchor_orient)) + anchor_feat = self.anchor_feat + anchor_orient_feat + anchor_mask = torch.zeros((B, 1), dtype=bool, device=device) + + # anchor loc (3) + size (3) + anchor_loc = torch.cat([ + data_dict['anchor_locs'].unsqueeze(1), + self.anchor_size.expand(B, -1, -1).to(device) + ], + dim=-1) + + # anchor type + anchor_type_id = torch.ones((B, 1), + dtype=torch.long, + device=device) + anchor_type_embed = self.obj_type_embed(anchor_type_id) + + # fuse anchor and objs + all_obj_feats = torch.cat([anchor_feat, obj_feats], dim=1) + all_obj_masks = torch.cat((anchor_mask, obj_masks), dim=1) + + all_obj_locs = torch.cat([anchor_loc, data_dict['obj_locs']], + dim=1) + all_obj_type_embeds = torch.cat( + (anchor_type_embed, obj_type_embeds), dim=1) + + else: + all_obj_feats = obj_feats + all_obj_masks = obj_masks + + all_obj_locs = data_dict['obj_locs'] + all_obj_type_embeds = obj_type_embeds + + all_obj_feats = all_obj_feats + all_obj_type_embeds + + # call spatial encoder + if self.use_spatial_attn: + pairwise_locs = calc_pairwise_locs( + all_obj_locs[:, :, :3], + all_obj_locs[:, :, 3:], + pairwise_rel_type=self.pairwise_rel_type, + spatial_dist_norm=self.spatial_dist_norm, + spatial_dim=self.spatial_dim, + ) + + for i, pc_layer in enumerate(self.spatial_encoder): + if self.obj_loc_encoding == 'diff_all': + query_pos = self.loc_layers[i](all_obj_locs) + else: + query_pos = self.loc_layers[0](all_obj_locs) + if not (self.obj_loc_encoding == 'same_0' and i > 0): + all_obj_feats = all_obj_feats + query_pos + + if self.use_spatial_attn: + all_obj_feats, _ = pc_layer(all_obj_feats, + pairwise_locs, + tgt_key_padding_mask=all_obj_masks) + else: + all_obj_feats, _ = pc_layer(all_obj_feats, + tgt_key_padding_mask=all_obj_masks) + + data_dict['obj_tokens'] = all_obj_feats + data_dict['obj_masks'] = ~all_obj_masks + + return data_dict diff --git a/models/LEO/requirements.txt b/models/LEO/requirements.txt new file mode 100644 index 0000000..8c85bd5 --- /dev/null +++ b/models/LEO/requirements.txt @@ -0,0 +1,24 @@ +accelerate==0.26.1 +easydict==1.10 +einops==0.6.1 +fschat==0.2.7 +fvcore==0.1.5.post20221221 +hydra-core==1.3.2 +jsonlines==3.1.0 +nltk==3.8.1 +numpy==1.25.0 +numpy-quaternion +omegaconf==2.3.0 +openai-clip +opencv-python==4.7.0.72 +pandas==2.0.2 +scipy==1.11.1 +sentence-transformers==2.2.2 +sentencepiece==0.1.99 +submitit==1.4.5 +timm==0.9.7 +tokenizers==0.13.3 +transformers==4.28.1 +transforms3d +trimesh==3.22.3 +wandb==0.15.4 diff --git a/models/LEO/run.py b/models/LEO/run.py new file mode 100755 index 0000000..59a336a --- /dev/null +++ b/models/LEO/run.py @@ -0,0 +1,38 @@ +import os +from datetime import datetime + +import common.io_utils as iu +import hydra +from accelerate.logging import get_logger +from common.misc import rgetattr +from trainer.build import build_trainer + +logger = get_logger(__name__) + + +@hydra.main(config_path='configs', config_name='default', version_base=None) +def main(cfg): + os.environ[ + 'TOKENIZERS_PARALLELISM'] = 'true' # suppress hf tokenizer warning + print(cfg.num_gpu) + naming_keys = [cfg.name] + for name in cfg.naming_keywords: + key = str(rgetattr(cfg, name)) + if key: + naming_keys.append(key) + exp_name = '_'.join(naming_keys) + + # Record the experiment + cfg.exp_dir = os.path.join( + cfg.base_dir, exp_name, + f"{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}" + if 'time' in cfg.naming_keywords else '') + iu.make_dir(cfg.exp_dir) + iu.save_yaml(cfg, os.path.join(cfg.exp_dir, 'config.json')) + + trainer = build_trainer(cfg) + trainer.run() + + +if __name__ == '__main__': + main() diff --git a/models/LEO/scripts/test_tuning_mmscan.sh b/models/LEO/scripts/test_tuning_mmscan.sh new file mode 100644 index 0000000..b1e2ce2 --- /dev/null +++ b/models/LEO/scripts/test_tuning_mmscan.sh @@ -0,0 +1,16 @@ +export WANDB_MODE=offline +export MASTER_PORT=9604 +# accelerate +python launch.py --name leo_tuning \ + --mode accelerate \ + --qos lv0b \ + --mem_per_gpu 100 \ + --time 48 \ + --config configs/default_val.yaml \ + --port 2050 \ + --gpu_per_node 4 \ + --num_nodes 1 \ + --partition HGX \ + task=tuning_noact \ + note=tuning_noact \ + pretrained_ckpt_path= path/to/model_last.pth \ diff --git a/models/LEO/scripts/train_tuning_mmscan.sh b/models/LEO/scripts/train_tuning_mmscan.sh new file mode 100644 index 0000000..3fc5ab4 --- /dev/null +++ b/models/LEO/scripts/train_tuning_mmscan.sh @@ -0,0 +1,16 @@ +export WANDB_MODE=offline +export MASTER_PORT=9604 +# accelerate +python launch.py --name leo_tuning \ + --mode accelerate \ + --qos lv0b \ + --mem_per_gpu 100 \ + --time 48 \ + --config configs/default_train.yaml \ + --port 2050 \ + --gpu_per_node 4 \ + --num_nodes 1 \ + --partition HGX \ + task=tuning_noact \ + note=tuning_noact \ + pretrained_ckpt_path=weights/sft_noact.pth \ diff --git a/models/LEO/trainer/__init__.py b/models/LEO/trainer/__init__.py new file mode 100755 index 0000000..bcc5ea1 --- /dev/null +++ b/models/LEO/trainer/__init__.py @@ -0,0 +1,2 @@ +from .leo_scaler import LeoScaler +from .leo_trainer import LeoTrainer diff --git a/models/LEO/trainer/build.py b/models/LEO/trainer/build.py new file mode 100755 index 0000000..326d170 --- /dev/null +++ b/models/LEO/trainer/build.py @@ -0,0 +1,84 @@ +import copy as cp +import math +import os +from datetime import datetime + +import torch.optim as optim +import wandb +from common.type_utils import cfg2dict +from fvcore.common.registry import Registry +from torch.optim.lr_scheduler import LambdaLR + +TRAINER_REGISTRY = Registry('Trainer') + + +class Tracker(): + + def __init__(self, cfg): + self.reset(cfg) + + def step(self): + self.epoch += 1 + self.loader_step = 0 + + def step_loader(self): + self.loader_step += 1 + + def reset(self, cfg): + self.exp_name = f"{cfg.note}-{datetime.now().strftime('%Y-%m-%d-%H:%M:%S')}" + self.run_id = wandb.util.generate_id() + self.epoch = 0 + self.loader_step = 0 + self.overall_best_result = 0 + + def state_dict(self): + return { + 'run_id': self.run_id, + 'epoch': self.epoch, + 'exp_name': self.exp_name, + 'loader_step': self.loader_step, + 'overall_best_result': self.overall_best_result, + } + + def load_state_dict(self, state_dict): + state_dict = cp.deepcopy(state_dict) + self.run_id = state_dict['run_id'] + self.epoch = state_dict['epoch'] + self.loader_step = state_dict['loader_step'] + self.exp_name = state_dict['exp_name'] + self.overall_best_result = state_dict['overall_best_result'] + + +def linear_warmup_cosine_decay(step, warmup_step, total_step): + if step <= warmup_step: + return 1e-3 + step / warmup_step * (1 - 1e-3) + return max( + 0.5 * (1 + math.cos( + (step - warmup_step) / (total_step - warmup_step) * math.pi)), + 1e-5) + + +def get_scheduler(cfg, optimizer, total_steps): + lambda_func = lambda step: globals()[cfg.training.schedule.name]( + step, cfg.training.schedule.args.warmup_steps, total_steps) + return LambdaLR(optimizer=optimizer, lr_lambda=lambda_func) + + +def build_optim(cfg, params, total_steps): + optimizer = getattr(optim, cfg.training.optim.name)(params, **cfg2dict( + cfg.training.optim.args)) + scheduler = get_scheduler(cfg, optimizer, total_steps) + return optimizer, scheduler + + +def latest_checkpoint(path): + if not os.path.exists(path): + return '' + checkpoints = [os.path.join(path, f) for f in os.listdir(path)] + if len(checkpoints) == 0: + return '' + return max(checkpoints, key=os.path.getmtime) + + +def build_trainer(cfg): + return TRAINER_REGISTRY.get(cfg.trainer)(cfg) diff --git a/models/LEO/trainer/leo_scaler.py b/models/LEO/trainer/leo_scaler.py new file mode 100644 index 0000000..ce23c71 --- /dev/null +++ b/models/LEO/trainer/leo_scaler.py @@ -0,0 +1,265 @@ +import os +import random +from collections import defaultdict +from datetime import timedelta +from math import ceil + +import torch +from accelerate import DistributedDataParallelKwargs +from accelerate.logging import get_logger +from accelerate.utils import (InitProcessGroupKwargs, ProjectConfiguration, + set_seed) +from common.misc import CustomAccelerator, default_collate, split_train_set +from data.build import build_dataloader_leo, get_dataset_leo +from model.leo_agent import LeoAgent +from omegaconf import OmegaConf +from torch.utils.data import DataLoader +from tqdm import trange +from trainer.build import (TRAINER_REGISTRY, Tracker, build_optim, + latest_checkpoint) +from trainer.leo_trainer import LeoTrainer + +logger = get_logger(__name__) + + +@TRAINER_REGISTRY.register() +class LeoScaler(LeoTrainer): + + def __init__(self, cfg): + set_seed(cfg.rng_seed) + self.exp_dir = cfg.exp_dir + self.epochs = cfg.training.epochs + + # initialize accelerator + ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) + init_kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800)) + kwargs = ([ddp_kwargs] if cfg.num_gpu > 1 else []) + [init_kwargs] + gradient_accumulation_steps = cfg.training.get( + 'gradient_accumulation_steps', 1) + + self.accelerator = CustomAccelerator( + project_config=ProjectConfiguration( + project_dir=self.exp_dir, + automatic_checkpoint_naming=True, + total_limit=1, + ), + gradient_accumulation_steps=gradient_accumulation_steps, + log_with=cfg.logger.name, + kwargs_handlers=kwargs) + + # dataset, dataloader, evaluator + self.eai_task_sources = ['hm3d', 'mp3d', 'cliport'] + self.data_loaders = {'train': []} # list of subsets + train_set = get_dataset_leo( + cfg=cfg, + split='train', + dataset_name=cfg.task.leomix.dataset, + dataset_wrapper_name=cfg.task.leomix.dataset_wrapper, + dataset_wrapper_args=cfg.task.leomix.dataset_wrapper_args) + train_subsets = split_train_set(train_set, self.epochs) + for train_subset in train_subsets: + self.data_loaders['train'].append( + DataLoader( + train_subset, + batch_size=cfg.dataloader.train.batchsize, + num_workers=cfg.dataloader.train.num_workers, + collate_fn=getattr(train_subset, 'collate_fn', + default_collate), + pin_memory=True, + shuffle=True, + drop_last=True, + )) + + self.data_loaders['val'] = build_dataloader_leo( + cfg=cfg, + split='test', + dataset_name=cfg.task.leomix.dataset, + dataset_wrapper_name=cfg.task.leomix.dataset_wrapper, + dataset_wrapper_args=cfg.task.leomix.dataset_wrapper_args, + dataloader_args=cfg.task.leomix.eval_dataloader_args, + ) + + # prepare dataloaders + self.data_loaders['train'] = [ + self.accelerator.prepare(sub_loader) + for sub_loader in self.data_loaders['train'] + ] + self.data_loaders['val'] = self.accelerator.prepare( + self.data_loaders['val']) + + # build model + self.model = LeoAgent(cfg) + learnable_named_params = self.model.get_learnable_named_params() + self.accelerator.learn_params_list = list( + learnable_named_params.keys()) + optim_params = list(learnable_named_params.values()) + + # prepare model, optimizer and scheduler + total_steps = sum([ + ceil(len(sub_loader) / gradient_accumulation_steps) + for sub_loader in self.data_loaders['train'] + ]) + self.optimizer, self.scheduler = build_optim(cfg, + optim_params, + total_steps=total_steps) + self.model, self.optimizer, self.scheduler = self.accelerator.prepare( + self.model, self.optimizer, self.scheduler) + + self.exp_tracker = Tracker(cfg) + self.accelerator.register_for_checkpointing(self.exp_tracker) + + # load checkpoints + resume_ckpt = latest_checkpoint( + os.path.join(self.exp_dir, 'checkpoints')) + + if resume_ckpt: + load_model_only = False + self.pretrained_ckpt_path = resume_ckpt + logger.info( + f'Train: resume and load state from {self.pretrained_ckpt_path}' + ) + elif cfg.pretrained_ckpt_path and os.path.exists( + cfg.pretrained_ckpt_path): + load_model_only = True + self.pretrained_ckpt_path = cfg.pretrained_ckpt_path + logger.info( + f'Train: start and load model from {self.pretrained_ckpt_path}' + ) + else: + self.pretrained_ckpt_path = None + logger.info('Train: start from scratch') + + if self.pretrained_ckpt_path is not None: + self.load(path=self.pretrained_ckpt_path, + model_only=load_model_only) + + # misc + self.grad_norm = cfg.training.grad_norm + + self.accelerator.init_trackers( + project_name=cfg.name, + config=OmegaConf.to_container(cfg, + resolve=True, + throw_on_missing=True), + init_kwargs={ + 'wandb': { + 'name': self.exp_tracker.exp_name, + 'entity': cfg.logger.entity, + 'id': self.exp_tracker.run_id, + 'resume': True + } + }) + + def train_step(self, epoch): + logger.info(f'Start training epoch {epoch+1}') + self.model.train() + loader = self.data_loaders['train'][ + epoch] # the only difference to LeoTrainer.train_step() + pbar = trange(len(loader), + disable=(not self.accelerator.is_main_process)) + + if self.exp_tracker.loader_step > 0: + logger.info( + f'Skip the first {self.exp_tracker.loader_step} batches') + loader = self.accelerator.skip_first_batches( + loader, self.exp_tracker.loader_step) + pbar.update(self.exp_tracker.loader_step) + + for data_dict in loader: + with self.accelerator.accumulate(self.model): + # categorize tasks + is_txt_data = [(s not in self.eai_task_sources) + for s in data_dict['source']] + is_eai_data = [(s in self.eai_task_sources) + for s in data_dict['source']] + + # forward + data_dict = self.forward(data_dict, inference=False) + + # calculate loss and optimize + loss = data_dict['loss'] + loss_all = loss.mean() + self.backward(loss_all) + + # record + loss_dict = {'overall': loss_all} + loss_txt = loss[is_txt_data] + loss_eai = loss[is_eai_data] + if len(loss_txt) > 0: + loss_dict.update({'txt': loss_txt.mean()}) + if len(loss_eai) > 0: + loss_dict.update({'eai': loss_eai.mean()}) + self.log(loss_dict, mode='train', task='loss') + self.exp_tracker.step_loader() + pbar.update(1) + + logger.info(f'Finish training epoch {epoch+1}') + + @torch.no_grad() + def val_step(self, epoch): + logger.info(f'Start validation epoch {epoch+1}') + self.model.eval() + loader = self.data_loaders['val'] + pbar = trange(len(loader), + disable=(not self.accelerator.is_main_process)) + all_losses = defaultdict(list) + for data_dict in loader: + # convert list to str for training forward + if not isinstance(data_dict['output_gt'][0], str): + data_dict['output_gt'] = [ + random.choice(answer_list) + for answer_list in data_dict['output_gt'] + ] + + # inference + data_dict = self.forward(data_dict, inference=False) + + # gather + data_dict_non_tensor = { + k: v + for k, v in data_dict.items() + if not isinstance(v, torch.Tensor) + } + data_dict_non_tensor = self.accelerator.gather_for_metrics( + data_dict_non_tensor) + data_dict = { + k: v + for k, v in data_dict.items() if isinstance(v, torch.Tensor) + } + data_dict = self.accelerator.gather_for_metrics(data_dict) + data_dict.update(data_dict_non_tensor) + + all_losses['overall'].append(data_dict['loss']) + + is_txt_data = [(s not in self.eai_task_sources) + for s in data_dict['source']] + is_eai_data = [(s in self.eai_task_sources) + for s in data_dict['source']] + loss_txt = data_dict['loss'][is_txt_data] + loss_eai = data_dict['loss'][is_eai_data] + if len(loss_txt) > 0: + all_losses['txt'].append(loss_txt) + if len(loss_eai) > 0: + all_losses['eai'].append(loss_eai) + + pbar.update(1) + + loss_dict = {} + for k, v in all_losses.items(): + loss_dict[k] = torch.cat(v).mean().item() + + self.log(loss_dict, mode='val', task='loss') + logger.info( + f'Finish validation epoch {epoch+1}, test set loss: {loss_dict}') + + def run(self): + start_epoch = self.exp_tracker.epoch + for epoch in range(start_epoch, self.epochs): + self.train_step(epoch) + self.exp_tracker.step() + self.save(model_only=False) # automatic checkpointing + self.accelerator.wait_for_everyone() + self.val_step(epoch) + + self.accelerator.wait_for_everyone() + self.accelerator.end_training() diff --git a/models/LEO/trainer/leo_trainer.py b/models/LEO/trainer/leo_trainer.py new file mode 100644 index 0000000..d7f9fbc --- /dev/null +++ b/models/LEO/trainer/leo_trainer.py @@ -0,0 +1,409 @@ +import json +import os +from datetime import timedelta +from math import ceil + +import torch +import torch.nn as nn +from accelerate import DistributedDataParallelKwargs +from accelerate.logging import get_logger +from accelerate.utils import (InitProcessGroupKwargs, ProjectConfiguration, + set_seed) +from common.io_utils import make_dir +from common.misc import CustomAccelerator +from data.build import build_dataloader_leo +from evaluator.build import build_eval_leo +from model.leo_agent import LeoAgent +from omegaconf import OmegaConf +from tqdm import trange +from trainer.build import (TRAINER_REGISTRY, Tracker, build_optim, + latest_checkpoint) + +logger = get_logger(__name__) + +model_parallel_classes = ( + nn.parallel.DistributedDataParallel, + nn.DataParallel, +) + + +@TRAINER_REGISTRY.register() +class LeoTrainer(): + + def __init__(self, cfg): + set_seed(cfg.rng_seed) + self.exp_dir = cfg.exp_dir + self.mode = cfg.mode + + # initialize accelerator + ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True) + init_kwargs = InitProcessGroupKwargs(timeout=timedelta(seconds=800)) + kwargs = ([ddp_kwargs] if cfg.num_gpu > 1 else []) + [init_kwargs] + gradient_accumulation_steps = cfg.training.get( + 'gradient_accumulation_steps', 1) + + self.accelerator = CustomAccelerator( + project_config=ProjectConfiguration( + project_dir=self.exp_dir, + automatic_checkpoint_naming=True, + total_limit=1, + ), + gradient_accumulation_steps=gradient_accumulation_steps, + log_with=cfg.logger.name, + kwargs_handlers=kwargs) + + # dataset, dataloader, evaluator + self.eai_task_sources = ['hm3d', 'mp3d', 'cliport'] + self.data_loaders = {'train': {}, 'val': {}, 'test': {}} + self.evaluators = {} + self.eval_metrics = {} + for task_name in cfg.task.keys(): + if cfg.task[task_name] and 'dataset' in cfg.task[task_name]: + for mode in cfg.task[task_name].mode: + self.data_loaders[mode][task_name] = build_dataloader_leo( + cfg=cfg, + split=mode, + dataset_name=cfg.task[task_name].dataset, + dataset_wrapper_name=cfg.task[task_name]. + dataset_wrapper, + dataset_wrapper_args=cfg.task[task_name]. + dataset_wrapper_args, + dataloader_args=cfg.task[task_name]. + train_dataloader_args if mode == 'train' else + cfg.task[task_name].eval_dataloader_args, + ) + if 'evaluator' in cfg.task[task_name]: + self.evaluators[task_name] = build_eval_leo( + cfg, task_name, cfg.task[task_name].evaluator) + self.eval_metrics[task_name] = 0 + + assert len(self.data_loaders['train'] + ) <= 1, 'LEO requires only one training set' + + # prepare dataloaders + all_loaders, all_loader_keys = [], [] + for mode, loaders in self.data_loaders.items(): + for task, loader in loaders.items(): + all_loader_keys.append((mode, task)) + all_loaders.append(loader) + accelerate_loaders = self.accelerator.prepare(*all_loaders) + for k, v in zip(all_loader_keys, accelerate_loaders): + self.data_loaders[k[0]][k[1]] = v + + # build model + self.model = LeoAgent(cfg) + learnable_named_params = self.model.get_learnable_named_params() + self.accelerator.learn_params_list = list( + learnable_named_params.keys()) + optim_params = list(learnable_named_params.values()) + + # prepare model, optimizer and scheduler + total_steps = ceil( + len(list(self.data_loaders['train'].values())[0]) / + gradient_accumulation_steps) * cfg.task.training.epochs + self.optimizer, self.scheduler = build_optim(cfg, + optim_params, + total_steps=total_steps) + self.model, self.optimizer, self.scheduler = self.accelerator.prepare( + self.model, self.optimizer, self.scheduler) + + self.exp_tracker = Tracker(cfg) + self.accelerator.register_for_checkpointing(self.exp_tracker) + + # load checkpoints + resume_ckpt = latest_checkpoint( + os.path.join(self.exp_dir, 'checkpoints')) + self_best_ckpt = os.path.join(self.exp_dir, 'best.pth') + print(self.mode) + if self.mode == 'train': + if resume_ckpt: + load_model_only = False + self.pretrained_ckpt_path = resume_ckpt + logger.info( + f'Train: resume and load state from {self.pretrained_ckpt_path}' + ) + elif cfg.pretrained_ckpt_path and os.path.exists( + cfg.pretrained_ckpt_path): + load_model_only = True + self.pretrained_ckpt_path = cfg.pretrained_ckpt_path + logger.info( + f'Train: start and load model from {self.pretrained_ckpt_path}' + ) + else: + self.pretrained_ckpt_path = None + logger.info('Train: start from scratch') + else: + if os.path.exists(self_best_ckpt): + self.pretrained_ckpt_path = self_best_ckpt + elif cfg.pretrained_ckpt_path and os.path.exists( + cfg.pretrained_ckpt_path): + self.pretrained_ckpt_path = cfg.pretrained_ckpt_path + else: + raise ValueError('No checkpoint to load for evaluation') + load_model_only = True + logger.info(f'Eval: load model from {self.pretrained_ckpt_path}') + + if self.pretrained_ckpt_path is not None: + self.load(path=self.pretrained_ckpt_path, + model_only=load_model_only) + + # misc + self.epochs = cfg.training.epochs + self.grad_norm = cfg.training.grad_norm + self.val_interval = cfg.eval.val_interval + self.num_batch_val = cfg.eval.num_batch_val + + self.accelerator.init_trackers( + project_name=cfg.name, + config=OmegaConf.to_container(cfg, + resolve=True, + throw_on_missing=True), + init_kwargs={ + 'wandb': { + 'name': self.exp_tracker.exp_name, + 'entity': cfg.logger.entity, + 'id': self.exp_tracker.run_id, + 'resume': True + } + }) + + def forward(self, data_dict, inference=False): + if inference: + if isinstance(self.model, model_parallel_classes): + return self.model.module.generate(data_dict) + else: + return self.model.generate(data_dict) + else: + return self.model(data_dict) + + def backward(self, loss): + self.optimizer.zero_grad() + self.accelerator.backward(loss) + if self.grad_norm is not None and self.accelerator.sync_gradients: + self.accelerator.clip_grad_norm_(self.model.parameters(), + self.grad_norm) + self.optimizer.step() + self.scheduler.step() + + def train_step(self, epoch): + logger.info(f'Start training epoch {epoch+1}') + self.model.train() + loader = list(self.data_loaders['train'].values())[0] + pbar = trange(len(loader), + disable=(not self.accelerator.is_main_process)) + + if self.exp_tracker.loader_step > 0: + logger.info( + f'Skip the first {self.exp_tracker.loader_step} batches') + loader = self.accelerator.skip_first_batches( + loader, self.exp_tracker.loader_step) + pbar.update(self.exp_tracker.loader_step) + + for data_dict in loader: + with self.accelerator.accumulate(self.model): + # categorize tasks + is_txt_data = [(s not in self.eai_task_sources) + for s in data_dict['source']] + is_eai_data = [(s in self.eai_task_sources) + for s in data_dict['source']] + + # forward + data_dict = self.forward(data_dict, inference=False) + + # calculate loss and optimize + loss = data_dict['loss'] + loss_all = loss.mean() + self.backward(loss_all) + + # record + loss_dict = {'overall': loss_all} + loss_txt = loss[is_txt_data] + loss_eai = loss[is_eai_data] + if len(loss_txt) > 0: + loss_dict.update({'txt': loss_txt.mean()}) + if len(loss_eai) > 0: + loss_dict.update({'eai': loss_eai.mean()}) + self.log(loss_dict, mode='train', task='loss') + self.exp_tracker.step_loader() + pbar.update(1) + + logger.info(f'Finish training epoch {epoch+1}') + + @torch.no_grad() + def val_step(self, epoch, full_val=False): + logger.info(f'Start validation epoch {epoch+1}') + self.model.eval() + for task_name in self.evaluators.keys(): + if task_name in self.data_loaders['val']: + loader = self.data_loaders['val'][task_name] + pbar = trange(len(loader), + disable=(not self.accelerator.is_main_process)) + for i, data_dict in enumerate(loader): + + # inference + data_dict = self.forward(data_dict, inference=True) + + # gather + data_dict_non_tensor = { + k: v + for k, v in data_dict.items() + if not isinstance(v, torch.Tensor) + } + data_dict_non_tensor = self.accelerator.gather_for_metrics( + data_dict_non_tensor) + data_dict = { + k: v + for k, v in data_dict.items() + if isinstance(v, torch.Tensor) + } + data_dict = self.accelerator.gather_for_metrics(data_dict) + data_dict.update(data_dict_non_tensor) + + self.evaluators[task_name].update(data_dict) + pbar.update(1) + + _, results = self.evaluators[task_name].record( + split='val', + is_main_process=self.accelerator.is_main_process) + + self.eval_metrics[task_name] = results['target_metric'] + self.log(results, mode='val', task=task_name) + logger.info(f'{task_name}: {results}') + self.evaluators[task_name].reset() + + # simply summing up + overall_avg_metrics = sum(list(self.eval_metrics.values())) / len( + self.eval_metrics) + self.log({'avg_metrics': overall_avg_metrics}, + mode='val', + task='overall') + if overall_avg_metrics > self.exp_tracker.overall_best_result: + is_best = True + self.exp_tracker.overall_best_result = overall_avg_metrics + else: + is_best = False + logger.info(f'Finish validation epoch {epoch+1}, is_best = {is_best}') + return is_best + + @torch.no_grad() + def test_step(self): + logger.info('Start final testing') + self.model.eval() + for task_name in self.evaluators.keys(): + if task_name in self.data_loaders['test']: + loader = self.data_loaders['test'][task_name] + pbar = trange(len(loader), + disable=(not self.accelerator.is_main_process)) + for idx, data_dict in enumerate(loader): + data_dict = self.forward(data_dict, inference=True) + + data_dict_non_tensor = { + k: v + for k, v in data_dict.items() + if not isinstance(v, torch.Tensor) + } + data_dict_non_tensor = self.accelerator.gather_for_metrics( + data_dict_non_tensor) + data_dict = { + k: v + for k, v in data_dict.items() + if isinstance(v, torch.Tensor) + } + data_dict = self.accelerator.gather_for_metrics(data_dict) + data_dict.update(data_dict_non_tensor) + + self.evaluators[task_name].update(data_dict) + pbar.update(1) + + if idx % 500 == 0: + json.dump( + self.evaluators[task_name].save_results, + open( + os.path.join(self.exp_dir, + f'test_{task_name}_{idx}.json'), + 'w')) + json.dump( + self.evaluators[task_name].save_results, + open( + os.path.join(self.exp_dir, + f'test_{task_name}_complete.json'), 'w')) + + _, results = self.evaluators[task_name].record( + split='test', + is_main_process=self.accelerator.is_main_process) + + self.log(results, mode='test', task=task_name) + logger.info(f'{task_name}: {results}') + self.evaluators[task_name].reset() + + logger.info('Finish testing') + + def log(self, results, mode='train', task='default'): + log_dict = {} + for key, val in results.items(): + log_dict[f'{mode}/{task}/{key}'] = val + + if mode == 'train': + lrs = self.scheduler.get_lr() + for i, lr in enumerate(lrs): + log_dict[f'train/lr/group_{i}'] = lr + + self.accelerator.log(log_dict) + + def save(self, name='best.pth', model_only=False): + if model_only: + path = os.path.join(self.exp_dir, name) + make_dir(path) + model_state_dict = self.accelerator.get_state_dict(self.model) + # automatically filter non-learnable params, and save on main_process + self.accelerator.save(model_state_dict, + os.path.join(path, 'pytorch_model.bin')) + else: + self.accelerator.save_state( + ) # automatic_checkpoint_naming = True -> self.exp_dir / checkpoints + + def load(self, path, model_only=False): + if model_only: + if os.path.exists(os.path.join(path, 'pytorch_model.bin')): + model_state_dict = torch.load( + os.path.join(path, 'pytorch_model.bin')) + else: + model_state_dict = torch.load(path) + if isinstance(self.model, model_parallel_classes): + self.model.module.load_state_dict(model_state_dict, + strict=False) + else: + self.model.load_state_dict(model_state_dict, strict=False) + else: + # resume training + self.accelerator.load_state(path, strict=False) + self.accelerator.project_configuration.iteration = int( + str(path)[-1]) + 1 + logger.info( + f'Successfully loaded from {str(path)}, load_model_only = {model_only}' + ) + + def run(self): + if self.mode == 'train': + start_epoch = self.exp_tracker.epoch + for epoch in range(start_epoch, self.epochs): + + self.train_step(epoch) + # if (epoch + 1) % self.val_interval == 0: + # is_best = self.val_step(epoch) + + # if is_best: + self.save('model_last.pth', model_only=True) + self.accelerator.wait_for_everyone() + + self.exp_tracker.step() + self.save(model_only=False) # automatic checkpointing + self.accelerator.wait_for_everyone() + + # load best checkpoint for test + logger.info('Training finished, load best checkpoint for testing') + self.load(os.path.join(self.exp_dir, 'best.pth'), model_only=True) + + self.test_step() + self.accelerator.wait_for_everyone() + self.accelerator.end_training() diff --git a/models/LL3DA/LICENSE b/models/LL3DA/LICENSE new file mode 100644 index 0000000..a6088da --- /dev/null +++ b/models/LL3DA/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Sijin Chen + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/models/LL3DA/data/scannet/README.md b/models/LL3DA/data/scannet/README.md new file mode 100644 index 0000000..c9ebad9 --- /dev/null +++ b/models/LL3DA/data/scannet/README.md @@ -0,0 +1,12 @@ +# ScanNet Instructions + +To acquire the access to ScanNet dataset, Please refer to the [ScanNet project page](https://github.com/ScanNet/ScanNet) and follow the instructions there. You will get a `download-scannet.py` script after your request for the ScanNet dataset is approved. Note that only a subset of ScanNet is needed. Once you get `download-scannet.py`, please use the commands below to download the portion of ScanNet that is necessary for ScanRefer: + +```shell +python2 download-scannet.py -o data/scannet --type _vh_clean_2.ply +python2 download-scannet.py -o data/scannet --type .aggregation.json +python2 download-scannet.py -o data/scannet --type _vh_clean_2.0.010000.segs.json +python2 download-scannet.py -o data/scannet --type .txt +``` + +Roughly 10.6GB free space is needed on your disk. diff --git a/models/LL3DA/data/scannet/batch_load_scannet_data.py b/models/LL3DA/data/scannet/batch_load_scannet_data.py new file mode 100644 index 0000000..8d60978 --- /dev/null +++ b/models/LL3DA/data/scannet/batch_load_scannet_data.py @@ -0,0 +1,100 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/batch_load_scannet_data.py. + +Batch mode in loading Scannet scenes with vertices and ground truth labels for semantic and instance segmentations + +Usage example: python ./batch_load_scannet_data.py +""" + +import datetime +import os +import pdb +import sys + +import numpy as np +from load_scannet_data import export + +SCANNET_DIR = '.../scans' # TODO: change this +# SCANNET_DIR = '.../scans_test' # HACK: If you wish to upload your results, remember to process the test set +SCAN_NAMES = os.listdir(SCANNET_DIR) +LABEL_MAP_FILE = 'meta_data/scannetv2-labels.combined.tsv' +DONOTCARE_CLASS_IDS = np.array([]) +OBJ_CLASS_IDS = np.array([ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 +]) # exclude wall (1), floor (2), ceiling (22) +MAX_NUM_POINT = 50000 +OUTPUT_FOLDER = './scannet_data' + + +def export_one_scan(scan_name, output_filename_prefix): + mesh_file = os.path.join(SCANNET_DIR, scan_name, + scan_name + '_vh_clean_2.ply') + agg_file = os.path.join(SCANNET_DIR, scan_name, + scan_name + '.aggregation.json') + seg_file = os.path.join(SCANNET_DIR, scan_name, + scan_name + '_vh_clean_2.0.010000.segs.json') + meta_file = os.path.join( + SCANNET_DIR, scan_name, scan_name + + '.txt') # includes axisAlignment info for the train set scans. + mesh_vertices, aligned_vertices, semantic_labels, instance_labels, instance_bboxes, aligned_instance_bboxes = export( + mesh_file, agg_file, seg_file, meta_file, LABEL_MAP_FILE, None) + + mask = np.logical_not(np.in1d(semantic_labels, DONOTCARE_CLASS_IDS)) + mesh_vertices = mesh_vertices[mask, :] + aligned_vertices = aligned_vertices[mask, :] + semantic_labels = semantic_labels[mask] + instance_labels = instance_labels[mask] + + if instance_bboxes.shape[0] > 1: + num_instances = len(np.unique(instance_labels)) + print('Num of instances: ', num_instances) + + # bbox_mask = np.in1d(instance_bboxes[:,-1], OBJ_CLASS_IDS) + bbox_mask = np.in1d(instance_bboxes[:, -2], + OBJ_CLASS_IDS) # match the mesh2cap + instance_bboxes = instance_bboxes[bbox_mask, :] + aligned_instance_bboxes = aligned_instance_bboxes[bbox_mask, :] + print('Num of care instances: ', instance_bboxes.shape[0]) + else: + print('No semantic/instance annotation for test scenes') + + N = mesh_vertices.shape[0] + if N > MAX_NUM_POINT: + choices = np.random.choice(N, MAX_NUM_POINT, replace=False) + mesh_vertices = mesh_vertices[choices, :] + aligned_vertices = aligned_vertices[choices, :] + semantic_labels = semantic_labels[choices] + instance_labels = instance_labels[choices] + + print('Shape of points: {}'.format(mesh_vertices.shape)) + + np.save(output_filename_prefix + '_vert.npy', mesh_vertices) + np.save(output_filename_prefix + '_aligned_vert.npy', aligned_vertices) + np.save(output_filename_prefix + '_sem_label.npy', semantic_labels) + np.save(output_filename_prefix + '_ins_label.npy', instance_labels) + np.save(output_filename_prefix + '_bbox.npy', instance_bboxes) + np.save(output_filename_prefix + '_aligned_bbox.npy', + aligned_instance_bboxes) + + +def batch_export(): + if not os.path.exists(OUTPUT_FOLDER): + print('Creating new data folder: {}'.format(OUTPUT_FOLDER)) + os.mkdir(OUTPUT_FOLDER) + + for scan_name in SCAN_NAMES: + output_filename_prefix = os.path.join(OUTPUT_FOLDER, scan_name) + # if os.path.exists(output_filename_prefix + '_vert.npy'): continue + + print('-' * 20 + 'begin') + print(datetime.datetime.now()) + print(scan_name) + + export_one_scan(scan_name, output_filename_prefix) + + print('-' * 20 + 'done') + + +if __name__ == '__main__': + batch_export() diff --git a/models/LL3DA/data/scannet/load_scannet_data.py b/models/LL3DA/data/scannet/load_scannet_data.py new file mode 100644 index 0000000..f4a5722 --- /dev/null +++ b/models/LL3DA/data/scannet/load_scannet_data.py @@ -0,0 +1,207 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/load_scannet_data.py. + +Load Scannet scenes with vertices and ground truth labels for semantic and +instance segmentations +""" + +import argparse +import inspect +import json +# python imports +import math +import os +import pdb +import sys + +import numpy as np +import scannet_utils + + +def read_aggregation(filename): + object_id_to_segs = {} + label_to_segs = {} + with open(filename) as f: + data = json.load(f) + num_objects = len(data['segGroups']) + for i in range(num_objects): + object_id = data['segGroups'][i][ + 'objectId'] + 1 # instance ids should be 1-indexed + label = data['segGroups'][i]['label'] + segs = data['segGroups'][i]['segments'] + object_id_to_segs[object_id] = segs + if label in label_to_segs: + label_to_segs[label].extend(segs) + else: + label_to_segs[label] = segs + return object_id_to_segs, label_to_segs + + +def read_segmentation(filename): + seg_to_verts = {} + with open(filename) as f: + data = json.load(f) + num_verts = len(data['segIndices']) + for i in range(num_verts): + seg_id = data['segIndices'][i] + if seg_id in seg_to_verts: + seg_to_verts[seg_id].append(i) + else: + seg_to_verts[seg_id] = [i] + return seg_to_verts, num_verts + + +def export(mesh_file, + agg_file, + seg_file, + meta_file, + label_map_file, + output_file=None): + """points are XYZ RGB (RGB in 0-255), semantic label as nyu40 ids, instance + label as 1-#instance, box as (cx,cy,cz,dx,dy,dz,semantic_label)""" + label_map = scannet_utils.read_label_mapping(label_map_file, + label_from='raw_category', + label_to='nyu40id') + # mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file) + mesh_vertices = scannet_utils.read_mesh_vertices_rgb_normal(mesh_file) + + # Load scene axis alignment matrix + lines = open(meta_file).readlines() + axis_align_matrix = None + for line in lines: + if 'axisAlignment' in line: + axis_align_matrix = [ + float(x) + for x in line.rstrip().strip('axisAlignment = ').split(' ') + ] + + if axis_align_matrix != None: + axis_align_matrix = np.array(axis_align_matrix).reshape((4, 4)) + pts = np.ones((mesh_vertices.shape[0], 4)) + pts[:, 0:3] = mesh_vertices[:, 0:3] + pts = np.dot(pts, axis_align_matrix.transpose()) # Nx4 + aligned_vertices = np.copy(mesh_vertices) + aligned_vertices[:, 0:3] = pts[:, 0:3] + else: + print('No axis alignment matrix found') + aligned_vertices = mesh_vertices + + # Load semantic and instance labels + if os.path.isfile(agg_file): + object_id_to_segs, label_to_segs = read_aggregation(agg_file) + seg_to_verts, num_verts = read_segmentation(seg_file) + + label_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + object_id_to_label_id = {} + for label, segs in label_to_segs.items(): + label_id = label_map[label] + for seg in segs: + verts = seg_to_verts[seg] + label_ids[verts] = label_id + instance_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + num_instances = len(np.unique(list(object_id_to_segs.keys()))) + for object_id, segs in object_id_to_segs.items(): + for seg in segs: + verts = seg_to_verts[seg] + instance_ids[verts] = object_id + if object_id not in object_id_to_label_id: + object_id_to_label_id[object_id] = label_ids[verts][0] + + instance_bboxes = np.zeros( + (num_instances, 8)) # also include object id + aligned_instance_bboxes = np.zeros( + (num_instances, 8)) # also include object id + for obj_id in object_id_to_segs: + label_id = object_id_to_label_id[obj_id] + + # bboxes in the original meshes + obj_pc = mesh_vertices[instance_ids == obj_id, 0:3] + if len(obj_pc) == 0: continue + # Compute axis aligned box + # An axis aligned bounding box is parameterized by + # (cx,cy,cz) and (dx,dy,dz) and label id + # where (cx,cy,cz) is the center point of the box, + # dx is the x-axis length of the box. + xmin = np.min(obj_pc[:, 0]) + ymin = np.min(obj_pc[:, 1]) + zmin = np.min(obj_pc[:, 2]) + xmax = np.max(obj_pc[:, 0]) + ymax = np.max(obj_pc[:, 1]) + zmax = np.max(obj_pc[:, 2]) + bbox = np.array([(xmin + xmax) / 2, (ymin + ymax) / 2, + (zmin + zmax) / 2, xmax - xmin, ymax - ymin, + zmax - zmin, label_id, + obj_id - 1]) # also include object id + # NOTE: this assumes obj_id is in 1,2,3,.,,,.NUM_INSTANCES + instance_bboxes[obj_id - 1, :] = bbox + + # bboxes in the aligned meshes + obj_pc = aligned_vertices[instance_ids == obj_id, 0:3] + if len(obj_pc) == 0: continue + # Compute axis aligned box + # An axis aligned bounding box is parameterized by + # (cx,cy,cz) and (dx,dy,dz) and label id + # where (cx,cy,cz) is the center point of the box, + # dx is the x-axis length of the box. + xmin = np.min(obj_pc[:, 0]) + ymin = np.min(obj_pc[:, 1]) + zmin = np.min(obj_pc[:, 2]) + xmax = np.max(obj_pc[:, 0]) + ymax = np.max(obj_pc[:, 1]) + zmax = np.max(obj_pc[:, 2]) + bbox = np.array([(xmin + xmax) / 2, (ymin + ymax) / 2, + (zmin + zmax) / 2, xmax - xmin, ymax - ymin, + zmax - zmin, label_id, + obj_id - 1]) # also include object id + # NOTE: this assumes obj_id is in 1,2,3,.,,,.NUM_INSTANCES + aligned_instance_bboxes[obj_id - 1, :] = bbox + else: + # use zero as placeholders for the test scene + print('use placeholders') + num_verts = mesh_vertices.shape[0] + label_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + instance_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + instance_bboxes = np.zeros((1, 8)) # also include object id + aligned_instance_bboxes = np.zeros((1, 8)) # also include object id + + if output_file is not None: + np.save(output_file + '_vert.npy', mesh_vertices) + np.save(output_file + '_aligned_vert.npy', aligned_vertices) + np.save(output_file + '_sem_label.npy', label_ids) + np.save(output_file + '_ins_label.npy', instance_ids) + np.save(output_file + '_bbox.npy', instance_bboxes) + np.save(output_file + '_aligned_bbox.npy', instance_bboxes) + + return mesh_vertices, aligned_vertices, label_ids, instance_ids, instance_bboxes, aligned_instance_bboxes + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--scan_path', + required=True, + help='path to scannet scene (e.g., data/ScanNet/v2/scene0000_00') + parser.add_argument('--output_file', required=True, help='output file') + parser.add_argument('--label_map_file', + required=True, + help='path to scannetv2-labels.combined.tsv') + opt = parser.parse_args() + + scan_name = os.path.split(opt.scan_path)[-1] + mesh_file = os.path.join(opt.scan_path, scan_name + '_vh_clean_2.ply') + agg_file = os.path.join(opt.scan_path, scan_name + '.aggregation.json') + seg_file = os.path.join(opt.scan_path, + scan_name + '_vh_clean_2.0.010000.segs.json') + meta_file = os.path.join( + opt.scan_path, scan_name + + '.txt') # includes axisAlignment info for the train set scans. + export(mesh_file, agg_file, seg_file, meta_file, opt.label_map_file, + opt.output_file) + + +if __name__ == '__main__': + main() diff --git a/models/LL3DA/data/scannet/meta_data/nyu40_labels.csv b/models/LL3DA/data/scannet/meta_data/nyu40_labels.csv new file mode 100644 index 0000000..fc39f73 --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/nyu40_labels.csv @@ -0,0 +1,41 @@ +nyu40id,nyu40class,mappedId,mappedIdConsecutive,weight +1,wall,(ignore),19,0.0 +2,floor,(ignore),19,0.0 +3,cabinet,3,1,3.9644974086960434 +4,bed,4,2,5.459494152836571 +5,chair,5,3,2.241522691584157 +6,sofa,6,4,4.820655512680854 +7,table,7,5,3.565918577548873 +8,door,8,6,3.538498341919445 +9,window,9,7,4.636521236560596 +10,bookshelf,10,8,5.445050937449535 +11,picture,11,9,5.079250281008131 +12,counter,12,10,6.2030429647735845 +13,blinds,(ignore),19,0.0 +14,desk,14,11,4.622662494840168 +15,shelves,(ignore),19,0.0 +16,curtain,16,12,5.956294301248057 +17,dresser,(ignore),19,0.0 +18,pillow,(ignore),19,0.0 +19,mirror,(ignore),19,0.0 +20,floor_mat,(ignore),19,0.0 +21,clothes,(ignore),19,0.0 +22,ceiling,(ignore),19,0.0 +23,books,(ignore),19,0.0 +24,refridgerator,24,13,5.459141107819665 +25,television,(ignore),19,0.0 +26,paper,(ignore),19,0.0 +27,towel,(ignore),19,0.0 +28,shower_curtain,28,14,6.724871661883906 +29,box,(ignore),19,0.0 +30,whiteboard,(ignore),19,0.0 +31,person,(ignore),19,0.0 +32,night_stand,(ignore),19,0.0 +33,toilet,33,15,5.832442848923174 +34,sink,34,16,5.064773947290611 +35,lamp,(ignore),19,0.0 +36,bathtub,36,17,6.738988357113375 +37,bag,(ignore),19,0.0 +38,otherstructure,(ignore),19,0.0 +39,otherfurniture,39,18,3.375217918833916 +40,otherprop,(ignore),19,0.0 diff --git a/models/LL3DA/data/scannet/meta_data/render_option.json b/models/LL3DA/data/scannet/meta_data/render_option.json new file mode 100644 index 0000000..f547bab --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/render_option.json @@ -0,0 +1,40 @@ +{ + "background_color" : [ 1, 1, 1 ], + "class_name" : "RenderOption", + "default_mesh_color" : [ 0.69999999999999996, 0.69999999999999996, 0.69999999999999996 ], + "image_max_depth" : 3000, + "image_stretch_option" : 0, + "interpolation_option" : 0, + "light0_color" : [ 1, 1, 1 ], + "light0_diffuse_power" : 0.66000000000000003, + "light0_position" : [ 0, 0, 2 ], + "light0_specular_power" : 0.20000000000000001, + "light0_specular_shininess" : 100, + "light1_color" : [ 1, 1, 1 ], + "light1_diffuse_power" : 0.66000000000000003, + "light1_position" : [ 0, 0, 2 ], + "light1_specular_power" : 0.20000000000000001, + "light1_specular_shininess" : 100, + "light2_color" : [ 1, 1, 1 ], + "light2_diffuse_power" : 0.66000000000000003, + "light2_position" : [ 0, 0, -2 ], + "light2_specular_power" : 0.20000000000000001, + "light2_specular_shininess" : 100, + "light3_color" : [ 1, 1, 1 ], + "light3_diffuse_power" : 0.66000000000000003, + "light3_position" : [ 0, 0, -2 ], + "light3_specular_power" : 0.20000000000000001, + "light3_specular_shininess" : 100, + "light_ambient_color" : [ 0, 0, 0 ], + "light_on" : true, + "mesh_color_option" : 1, + "mesh_shade_option" : 0, + "mesh_show_back_face" : false, + "mesh_show_wireframe" : false, + "point_color_option" : 9, + "point_show_normal" : false, + "point_size" : 5, + "show_coordinate_frame" : false, + "version_major" : 1, + "version_minor" : 0 +} diff --git a/models/LL3DA/data/scannet/meta_data/scannet_means.npz b/models/LL3DA/data/scannet/meta_data/scannet_means.npz new file mode 100644 index 0000000..e57647c Binary files /dev/null and b/models/LL3DA/data/scannet/meta_data/scannet_means.npz differ diff --git a/models/LL3DA/data/scannet/meta_data/scannet_reference_means.npz b/models/LL3DA/data/scannet/meta_data/scannet_reference_means.npz new file mode 100644 index 0000000..75b9cf2 Binary files /dev/null and b/models/LL3DA/data/scannet/meta_data/scannet_reference_means.npz differ diff --git a/models/LL3DA/data/scannet/meta_data/scannetv2-labels.combined.tsv b/models/LL3DA/data/scannet/meta_data/scannetv2-labels.combined.tsv new file mode 100644 index 0000000..03ddbdc --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/scannetv2-labels.combined.tsv @@ -0,0 +1,608 @@ +id raw_category category count nyu40id eigen13id nyuClass nyu40class eigen13class ModelNet40 ModelNet10 ShapeNetCore55 synsetoffset wnsynsetid wnsynsetkey mpcat40 mpcat40index +1 wall wall 8277 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +2 chair chair 4646 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +22 books book 1678 23 2 book books Books n02870526 book.n.11 objects 39 +3 floor floor 1553 2 5 floor floor Floor n03365592 floor.n.01 floor 2 +5 door door 1483 8 12 door door Wall door n03221720 door.n.01 door 4 +1163 object object 1313 40 7 otherprop Objects objects 39 +16 window window 1209 9 13 window window Window n04587648 window.n.01 window 9 +4 table table 1170 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +56 trash can trash can 1090 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +13 pillow pillow 937 18 7 pillow pillow Objects pillow 3938244 n03938244 pillow.n.01 cushion 8 +15 picture picture 862 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +41 ceiling ceiling 806 22 3 ceiling ceiling Ceiling n02990373 ceiling.n.01 ceiling 17 +26 box box 775 29 7 box box Objects n02883344 box.n.01 objects 39 +161 doorframe doorframe 768 8 12 door door Wall door doorframe.n.01 door 4 +19 monitor monitor 765 40 7 monitor otherprop Objects monitor monitor tv or monitor 3211117 n03782190 monitor.n.04 objects 39 +7 cabinet cabinet 731 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +9 desk desk 680 14 10 desk desk Table desk desk table 4379243 n03179701 desk.n.01 table 5 +8 shelf shelf 641 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +10 office chair office chair 595 5 4 chair chair Chair chair chair chair 3001627 n04373704 swivel_chair.n.01 chair 3 +31 towel towel 570 27 7 towel towel Objects n04459362 towel.n.01 towel 20 +6 couch couch 502 6 9 sofa sofa Sofa sofa sofa sofa 4256520 n04256520 sofa.n.01 sofa 10 +14 sink sink 488 34 7 sink sink Objects sink n04223580 sink.n.01 sink 15 +48 backpack backpack 479 40 7 backpack otherprop Objects n02769748 backpack.n.01 objects 39 +28 lamp lamp 419 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +11 bed bed 370 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +18 bookshelf bookshelf 360 10 6 bookshelf bookshelf Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +71 mirror mirror 349 19 7 mirror mirror Objects n03773035 mirror.n.01 mirror 21 +21 curtain curtain 347 16 13 curtain curtain Window curtain n03151077 curtain.n.01 curtain 12 +40 plant plant 331 40 7 plant otherprop Objects plant n00017222 plant.n.02 plant 14 +52 whiteboard whiteboard 327 30 7 whiteboard whiteboard Objects n03211616 display_panel.n.01 board_panel 35 +96 radiator radiator 322 39 6 radiator otherfurniture Furniture n04041069 radiator.n.02 misc 40 +22 book book 318 23 2 book books Books n02870526 book.n.11 objects 39 +29 kitchen cabinet kitchen cabinet 310 3 6 cabinet cabinet Furniture n02933112 cabinet.n.01 cabinet 7 +49 toilet paper toilet paper 291 40 7 toilet paper otherprop Objects n15075141 toilet_tissue.n.01 objects 39 +29 kitchen cabinets kitchen cabinet 289 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +23 armchair armchair 281 5 4 chair chair Chair chair chair chair 3001627 n02738535 armchair.n.01 chair 3 +63 shoes shoe 272 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +24 coffee table coffee table 258 7 10 coffee table table Table table table table 4379243 n03063968 coffee_table.n.01 table 5 +17 toilet toilet 256 33 7 toilet toilet Objects toilet toilet n04446276 toilet.n.01 toilet 18 +47 bag bag 252 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +32 clothes clothes 248 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +46 keyboard keyboard 246 40 7 keyboard otherprop Objects keyboard computer keyboard 3085013 n03085013 computer_keyboard.n.01 objects 39 +65 bottle bottle 226 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +97 recycling bin recycling bin 225 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +34 nightstand nightstand 224 32 6 night stand night stand Furniture night_stand night_stand n03015254 chest_of_drawers.n.01 chest_of_drawers 13 +38 stool stool 221 40 7 stool otherprop Objects stool n04326896 stool.n.01 stool 19 +33 tv tv 219 25 11 television television TV tv or monitor 3211117 n03211117 display.n.06 tv_monitor 22 +75 file cabinet file cabinet 217 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +36 dresser dresser 213 17 6 dresser dresser Furniture dresser dresser n03015254 chest_of_drawers.n.01 chest_of_drawers 13 +64 computer tower computer tower 203 40 7 computer otherprop Objects n03082979 computer.n.01 objects 39 +32 clothing clothes 165 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +101 telephone telephone 164 40 7 telephone otherprop Objects telephone 4401088 n04401088 telephone.n.01 objects 39 +130 cup cup 157 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +27 refrigerator refrigerator 154 24 6 refrigerator refrigerator Furniture n04070727 refrigerator.n.01 appliances 37 +44 end table end table 147 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +131 jacket jacket 146 40 7 jacket otherprop Objects n03589791 jacket.n.01 clothes 38 +55 shower curtain shower curtain 144 28 7 shower curtain shower curtain Objects curtain n04209239 shower_curtain.n.01 curtain 12 +42 bathtub bathtub 144 36 7 bathtub bathtub Objects bathtub bathtub tub 2808440 n02808440 bathtub.n.01 bathtub 25 +59 microwave microwave 141 40 7 microwave otherprop Objects microwave 3761084 n03761084 microwave.n.02 appliances 37 +159 kitchen counter kitchen counter 140 12 6 counter counter Furniture table table table 4379243 n03116530 counter.n.01 counter 26 +74 sofa chair sofa chair 129 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +82 paper towel dispenser paper towel dispenser 129 40 7 paper towel dispenser otherprop Objects objects 39 +1164 bathroom vanity bathroom vanity 126 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 table 5 +93 suitcase suitcase 118 40 7 luggage otherprop Objects n02773838 bag.n.06 objects 39 +77 laptop laptop 111 40 7 laptop otherprop Objects laptop laptop 3642806 n03642806 laptop.n.01 objects 39 +67 ottoman ottoman 111 39 6 ottoman otherfurniture Furniture stool n03380724 footstool.n.01 stool 19 +128 shower walls shower wall 109 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +50 printer printer 106 40 7 printer otherprop Objects printer 4004475 n04004475 printer.n.03 appliances 37 +35 counter counter 104 12 6 counter counter Furniture table table table 4379243 n03116530 counter.n.01 counter 26 +69 board board 100 38 7 board otherstructure Objects board_panel 35 +100 soap dispenser soap dispenser 99 40 7 otherprop Objects n04254120 soap_dispenser.n.01 objects 39 +62 stove stove 95 38 7 stove otherstructure Objects stove 4330267 n04330267 stove.n.02 appliances 37 +105 light light 93 38 7 light otherstructure Objects n03665366 light.n.02 lighting 28 +1165 closet wall closet wall 90 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +165 mini fridge mini fridge 87 24 6 refrigerator refrigerator Furniture n03273913 electric_refrigerator.n.01 appliances 37 +7 cabinets cabinet 79 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +5 doors door 76 8 12 door door Wall door n03221720 door.n.01 door 4 +76 fan fan 75 40 7 fan otherprop Objects n03320046 fan.n.01 misc 40 +230 tissue box tissue box 73 40 7 tissue box otherprop Objects n02883344 box.n.01 objects 39 +54 blanket blanket 72 40 7 blanket otherprop Objects n02849154 blanket.n.01 objects 39 +125 bathroom stall bathroom stall 71 38 7 otherstructure Objects n02873839 booth.n.02 misc 40 +72 copier copier 70 40 7 otherprop Objects n03257586 duplicator.n.01 appliances 37 +68 bench bench 66 39 6 bench otherfurniture Furniture bench bench 2828884 n02828884 bench.n.01 seating 34 +145 bar bar 66 38 7 bar otherstructure Objects n02788689 bar.n.03 misc 40 +157 soap dish soap dish 65 40 7 soap dish otherprop Objects n04254009 soap_dish.n.01 objects 39 +1166 laundry hamper laundry hamper 65 40 7 laundry basket otherprop Objects objects 39 +132 storage bin storage bin 63 40 7 storage bin otherprop Objects objects 39 +1167 bathroom stall door bathroom stall door 62 8 12 door door Wall door n03221720 door.n.01 door 4 +232 light switch light switch 61 38 7 light switch otherstructure Objects n04372370 switch.n.01 misc 40 +134 coffee maker coffee maker 61 40 7 otherprop Objects n03063338 coffee_maker.n.01 appliances 37 +51 tv stand tv stand 61 39 6 tv stand otherfurniture Furniture tv_stand n03290653 entertainment_center.n.01 furniture 36 +250 decoration decoration 60 40 7 otherprop Objects n03169390 decoration.n.01 misc 40 +1168 ceiling light ceiling light 59 38 7 light otherstructure Objects n03665366 light.n.02 lighting 28 +342 range hood range hood 59 38 7 range hood otherstructure Objects range_hood n04053677 range_hood.n.01 misc 40 +89 blackboard blackboard 58 38 7 blackboard otherstructure Objects n02846511 blackboard.n.01 board_panel 35 +103 clock clock 58 40 7 clock otherprop Objects clock 3046257 n03046257 clock.n.01 objects 39 +99 wardrobe closet wardrobe 54 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +95 rail rail 53 38 7 railing otherstructure Objects n04047401 railing.n.01 railing 30 +154 bulletin board bulletin board 53 38 7 board otherstructure Objects n03211616 display_panel.n.01 board_panel 35 +140 mat mat 52 20 5 floor mat floor mat Floor n03727837 mat.n.01 floor 2 +1169 trash bin trash bin 52 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +193 ledge ledge 51 38 7 otherstructure Objects n09337253 ledge.n.01 misc 40 +116 seat seat 49 39 6 furniture otherfurniture Furniture n04161981 seat.n.03 furniture 36 +202 mouse mouse 49 40 7 mouse otherprop Objects n03793489 mouse.n.04 objects 39 +73 basket basket 48 40 7 basket otherprop Objects basket 2801938 n02801938 basket.n.01 objects 39 +78 shower shower 48 38 7 otherstructure Objects n04208936 shower.n.01 shower 23 +1170 dumbbell dumbbell 48 40 7 otherprop Objects n03255030 dumbbell.n.01 objects 39 +79 paper paper 46 26 7 paper paper Objects n14974264 paper.n.01 objects 39 +80 person person 46 31 7 person person Objects person n05217688 person.n.02 misc 40 +141 windowsill windowsill 45 38 7 otherstructure Objects n04590263 windowsill.n.01 window 9 +57 closet closet 45 39 6 wardrobe otherfurniture Furniture wardrobe misc 40 +102 bucket bucket 45 40 7 bucket otherprop Objects n02909870 bucket.n.01 misc 40 +261 sign sign 44 40 7 sign otherprop Objects n04217882 signboard.n.01 objects 39 +118 speaker speaker 43 40 7 speaker otherprop Objects speaker 3691459 n03691459 loudspeaker.n.01 objects 39 +136 dishwasher dishwasher 43 38 7 dishwasher otherstructure Objects dishwasher 3207941 n03207941 dishwasher.n.01 appliances 37 +98 container container 43 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1171 stair rail stair rail 42 38 7 banister otherstructure Objects n02788148 bannister.n.02 railing 30 +170 shower curtain rod shower curtain rod 42 40 7 otherprop Objects curtain 12 +1172 tube tube 41 40 7 otherprop Objects misc 40 +1173 bathroom cabinet bathroom cabinet 39 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +79 papers paper 39 26 7 paper paper Objects n14974264 paper.n.01 objects 39 +221 storage container storage container 39 40 7 container otherprop Objects objects 39 +570 paper bag paper bag 39 37 7 bag bag Objects n04122825 sack.n.01 objects 39 +138 paper towel roll paper towel roll 39 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01 towel 20 +168 ball ball 39 40 7 ball otherprop Objects objects 39 +276 closet doors closet door 38 8 12 door door Wall door n03221720 door.n.01 door 4 +106 laundry basket laundry basket 37 40 7 laundry basket otherprop Objects basket 2801938 n03050864 clothes_hamper.n.01 objects 39 +214 cart cart 37 40 7 cart otherprop Objects n03484083 handcart.n.01 shelving 31 +276 closet door closet door 35 8 12 door door Wall door n03221720 door.n.01 door 4 +323 dish rack dish rack 35 40 7 dish rack otherprop Objects n03207630 dish_rack.n.01 objects 39 +58 stairs stairs 35 38 7 stairs otherstructure Objects n04298308 stairway.n.01 stairs 16 +86 blinds blinds 35 13 13 blinds blinds Window n02851099 blind.n.03 blinds 32 +2 stack of chairs chair 35 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +399 purse purse 34 40 7 purse otherprop Objects n02774152 bag.n.04 objects 39 +121 bicycle bicycle 33 40 7 bicycle otherprop Objects bicycle 2834778 n02834778 bicycle.n.01 objects 39 +185 tray tray 32 40 7 tray otherprop Objects n04476259 tray.n.01 objects 39 +300 plunger plunger 30 40 7 otherprop Objects n03970156 plunger.n.03 objects 39 +180 paper cutter paper cutter 30 40 7 paper cutter otherprop Objects n03886940 paper_cutter.n.01 objects 39 +163 toilet paper dispenser toilet paper dispenser 29 40 7 otherprop Objects objects 39 +26 boxes box 29 29 7 box box Objects n02883344 box.n.01 objects 39 +66 bin bin 28 40 7 bin otherprop Objects n02839910 bin.n.01 objects 39 +208 toilet seat cover dispenser toilet seat cover dispenser 28 40 7 otherprop Objects objects 39 +112 guitar guitar 28 40 7 guitar otherprop Objects guitar guitar 3467517 n03467517 guitar.n.01 objects 39 +540 mailboxes mailbox 28 29 7 box box Objects mailbox 3710193 n03710193 mailbox.n.01 misc 40 +395 handicap bar handicap bar 27 38 7 bar otherstructure Objects misc 40 +166 fire extinguisher fire extinguisher 27 40 7 fire extinguisher otherprop Objects n03345837 fire_extinguisher.n.01 misc 40 +122 ladder ladder 27 39 6 ladder otherfurniture Furniture stairs n03632277 ladder.n.01 stairs 16 +120 column column 26 38 7 column otherstructure Objects n03074380 column.n.06 column 24 +107 pipe pipe 25 40 7 pipe otherprop Objects n03944672 pipe.n.02 misc 40 +283 vacuum cleaner vacuum cleaner 25 40 7 otherprop Objects n04517823 vacuum.n.04 objects 39 +88 plate plate 24 40 7 plate otherprop Objects n03959485 plate.n.04 objects 39 +90 piano piano 24 39 6 piano otherfurniture Furniture piano piano 3928116 n03928116 piano.n.01 furniture 36 +177 water cooler water cooler 24 39 6 water cooler otherfurniture Furniture n04559166 water_cooler.n.01 misc 40 +1174 cd case cd case 24 40 7 otherprop Objects objects 39 +562 bowl bowl 24 40 7 bowl otherprop Objects bowl bowl 2880940 n02880940 bowl.n.03 objects 39 +1175 closet rod closet rod 24 40 7 otherprop Objects n04100174 rod.n.01 misc 40 +1156 bathroom counter bathroom counter 24 12 6 counter counter Furniture table table table 4379243 n03116530 counter.n.01 counter 26 +84 oven oven 23 38 7 oven otherstructure Objects n03862676 oven.n.01 appliances 37 +104 stand stand 23 39 6 stand otherfurniture Furniture table table table 4379243 n04301000 stand.n.04 table 5 +229 scale scale 23 40 7 scale otherprop Objects n04141975 scale.n.07 objects 39 +70 washing machine washing machine 23 39 6 washing machine otherfurniture Furniture washing_machine 4554684 n04554684 washer.n.03 appliances 37 +325 broom broom 22 40 7 broom otherprop Objects n02906734 broom.n.01 objects 39 +169 hat hat 22 40 7 hat otherprop Objects n03497657 hat.n.01 clothes 38 +128 shower wall shower wall 22 1 12 wall wall Wall n04208936 shower.n.01 wall 1 +331 guitar case guitar case 21 40 7 guitar case otherprop Objects objects 39 +87 rack rack 21 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +488 water pitcher water pitcher 21 40 7 pitcher otherprop Objects n03950228 pitcher.n.02 objects 39 +776 laundry detergent laundry detergent 21 40 7 otherprop Objects objects 39 +370 hair dryer hair dryer 21 40 7 hair dryer otherprop Objects n03483316 hand_blower.n.01 objects 39 +191 pillar pillar 21 38 7 column otherstructure Objects n03073977 column.n.07 column 24 +748 divider divider 20 40 7 otherprop Objects wall 1 +242 power outlet power outlet 19 40 7 otherprop Objects misc 40 +45 dining table dining table 19 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +417 shower floor shower floor 19 2 5 floor floor Floor n04208936 shower.n.01 floor 2 +70 washing machines washing machine 19 39 6 washing machine otherfurniture Furniture washing_machine 4554684 n04554684 washer.n.03 appliances 37 +188 shower door shower door 19 8 12 door door Wall door n04208936 shower.n.01 door 4 +1176 coffee kettle coffee kettle 18 40 7 pot otherprop Objects n03612814 kettle.n.01 objects 39 +1177 wardrobe cabinet wardrobe 18 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +1178 structure structure 18 38 7 otherstructure Objects misc 40 +18 bookshelves bookshelf 17 10 6 bookshelf bookshelf Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +110 clothes dryer clothes dryer 17 39 6 otherfurniture Furniture n03251766 dryer.n.01 appliances 37 +148 toaster toaster 17 40 7 toaster otherprop Objects n04442312 toaster.n.02 appliances 37 +63 shoe shoe 17 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +155 ironing board ironing board 16 39 6 ironing board otherfurniture Furniture n03586090 ironing_board.n.01 objects 39 +572 alarm clock alarm clock 16 40 7 alarm clock otherprop Objects clock 3046257 n02694662 alarm_clock.n.01 objects 39 +1179 shower head shower head 15 38 7 otherstructure Objects shower 23 +28 lamp base lamp 15 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +392 water bottle water bottle 15 40 7 bottle otherprop Objects bottle bottle 2876657 n04557648 water_bottle.n.01 objects 39 +1180 keyboard piano keyboard piano 15 39 6 piano otherfurniture Furniture piano piano 3928116 n03928116 piano.n.01 furniture 36 +609 projector screen projector screen 15 38 7 projector screen otherstructure Objects misc 40 +1181 case of water bottles case of water bottles 15 40 7 otherprop Objects objects 39 +195 toaster oven toaster oven 14 40 7 toaster oven otherprop Objects n04442441 toaster_oven.n.01 appliances 37 +581 music stand music stand 14 39 6 music stand otherfurniture Furniture n03801760 music_stand.n.01 furniture 36 +58 staircase stairs 14 38 7 stairs otherstructure Objects n04298308 stairway.n.01 stairs 16 +1182 coat rack coat rack 14 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 3 +1183 storage organizer storage organizer 14 40 7 otherprop Objects shelving 3 +139 machine machine 14 40 7 machine otherprop Objects n03699975 machine.n.01 appliances 37 +1184 folded chair folded chair 14 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1185 fire alarm fire alarm 14 40 7 otherprop Objects n03343737 fire_alarm.n.02 misc 40 +156 fireplace fireplace 13 38 7 fireplace otherstructure Objects n03346455 fireplace.n.01 fireplace 27 +408 vent vent 13 40 7 otherprop Objects n04526241 vent.n.01 misc 40 +213 furniture furniture 13 39 6 furniture otherfurniture Furniture n03405725 furniture.n.01 furniture 36 +1186 power strip power strip 13 40 7 otherprop Objects objects 39 +1187 calendar calendar 13 40 7 otherprop Objects objects 39 +1188 poster poster 13 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +115 toilet paper holder toilet paper holder 13 40 7 toilet paper holder otherprop Objects objects 39 +1189 potted plant potted plant 12 40 7 plant otherprop Objects plant n00017222 plant.n.02 plant 14 +304 stuffed animal stuffed animal 12 40 7 stuffed animal otherprop Objects n04399382 teddy.n.01 objects 39 +1190 luggage luggage 12 40 7 luggage otherprop Objects n02774630 baggage.n.01 objects 39 +21 curtains curtain 12 16 13 curtain curtain Window curtain n03151077 curtain.n.01 curtain 12 +312 headphones headphones 12 40 7 otherprop Objects n03261776 earphone.n.01 objects 39 +233 crate crate 12 39 6 crate otherfurniture Furniture n03127925 crate.n.01 objects 39 +286 candle candle 12 40 7 candle otherprop Objects lamp n02948072 candle.n.01 objects 39 +264 projector projector 12 40 7 projector otherprop Objects n04009552 projector.n.02 objects 39 +110 clothes dryers clothes dryer 12 39 6 otherfurniture Furniture n03251766 dryer.n.01 appliances 37 +1191 mattress mattress 12 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +356 dustpan dustpan 12 40 7 otherprop Objects n03259009 dustpan.n.02 objects 39 +25 drawer drawer 11 39 6 drawer otherfurniture Furniture n03233905 drawer.n.01 furniture 36 +750 rod rod 11 40 7 otherprop Objects pistol 3948459 n03427202 gat.n.01 misc 40 +269 globe globe 11 40 7 globe otherprop Objects objects 39 +307 footrest footrest 11 39 6 foot rest otherfurniture Furniture stool n03380724 footstool.n.01 stool 19 +410 piano bench piano bench 11 39 6 piano bench otherfurniture Furniture bench bench 2828884 n02828884 bench.n.01 seating 34 +730 breakfast bar breakfast bar 11 38 7 bar otherstructure Objects counter 26 +216 step stool step stool 11 40 7 step stool otherprop Objects stool n04315713 step_stool.n.01 stool 19 +1192 hand rail hand rail 11 38 7 railing otherstructure Objects railing 30 +119 vending machine vending machine 11 40 7 machine otherprop Objects n04525305 vending_machine.n.01 appliances 37 +682 ceiling fan ceiling fan 11 40 7 fan otherprop Objects n03320046 fan.n.01 misc 40 +434 swiffer swiffer 11 40 7 otherprop Objects objects 39 +126 foosball table foosball table 11 39 6 foosball table otherfurniture Furniture table table table 4379243 n04379243 table.n.02 table 5 +919 jar jar 11 40 7 jar otherprop Objects jar 3593526 n03593526 jar.n.01 objects 39 +85 footstool footstool 11 39 6 ottoman otherfurniture Furniture stool n03380724 footstool.n.01 stool 19 +1193 folded table folded table 10 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +108 round table round table 10 7 10 table table Table table table table 4379243 n04114554 round_table.n.02 table 5 +135 hamper hamper 10 40 7 basket otherprop Objects basket 2801938 n03482405 hamper.n.02 objects 39 +1194 poster tube poster tube 10 40 7 otherprop Objects objects 39 +432 case case 10 40 7 case otherprop Objects objects 39 +53 carpet carpet 10 40 7 rug otherprop Objects n04118021 rug.n.01 floor 2 +1195 thermostat thermostat 10 40 7 otherprop Objects n04422875 thermostat.n.01 misc 40 +111 coat coat 10 40 7 jacket otherprop Objects n03057021 coat.n.01 clothes 38 +305 water fountain water fountain 10 38 7 water fountain otherstructure Objects n03241335 drinking_fountain.n.01 misc 40 +1125 smoke detector smoke detector 10 40 7 otherprop Objects misc 40 +13 pillows pillow 9 18 7 pillow pillow Objects pillow 3938244 n03938244 pillow.n.01 cushion 8 +1196 flip flops flip flops 9 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +1197 cloth cloth 9 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +1198 banner banner 9 40 7 otherprop Objects n02788021 banner.n.01 misc 40 +1199 clothes hanger clothes hanger 9 40 7 otherprop Objects n03057920 coat_hanger.n.01 objects 39 +1200 whiteboard eraser whiteboard eraser 9 40 7 otherprop Objects objects 39 +378 iron iron 9 40 7 otherprop Objects n03584829 iron.n.04 objects 39 +591 instrument case instrument case 9 40 7 case otherprop Objects objects 39 +49 toilet paper rolls toilet paper 9 40 7 toilet paper otherprop Objects n15075141 toilet_tissue.n.01 objects 39 +92 soap soap 9 40 7 soap otherprop Objects n04253437 soap.n.01 objects 39 +1098 block block 9 40 7 otherprop Objects misc 40 +291 wall hanging wall hanging 8 40 7 otherprop Objects n03491178 hanging.n.01 picture 6 +1063 kitchen island kitchen island 8 38 7 kitchen island otherstructure Objects n03620600 kitchen_island.n.01 counter 26 +107 pipes pipe 8 38 7 otherstructure Objects misc 40 +1135 toothbrush toothbrush 8 40 7 toothbrush otherprop Objects n04453156 toothbrush.n.01 objects 39 +189 shirt shirt 8 40 7 otherprop Objects n04197391 shirt.n.01 clothes 38 +245 cutting board cutting board 8 40 7 cutting board otherprop Objects n03025513 chopping_board.n.01 objects 39 +194 vase vase 8 40 7 vase otherprop Objects vase jar 3593526 n04522168 vase.n.01 objects 39 +1201 shower control valve shower control valve 8 38 7 otherstructure Objects n04208936 shower.n.01 shower 23 +386 exercise machine exercise machine 8 40 7 machine otherprop Objects gym_equipment 33 +1202 compost bin compost bin 8 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +857 shorts shorts 8 40 7 shorts otherprop Objects clothes 38 +452 tire tire 8 40 7 otherprop Objects n04440749 tire.n.01 objects 39 +1203 teddy bear teddy bear 7 40 7 stuffed animal otherprop Objects n04399382 teddy.n.01 objects 39 +346 bathrobe bathrobe 7 40 7 otherprop Objects n02807616 bathrobe.n.01 clothes 38 +152 handrail handrail 7 38 7 railing otherstructure Objects n02788148 bannister.n.02 railing 30 +83 faucet faucet 7 40 7 faucet otherprop Objects faucet 3325088 n03325088 faucet.n.01 misc 40 +1204 pantry wall pantry wall 7 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +726 thermos thermos 7 40 7 flask otherprop Objects bottle bottle 2876657 n04422727 thermos.n.01 objects 39 +61 rug rug 7 40 7 rug otherprop Objects n04118021 rug.n.01 floor 2 +39 couch cushions cushion 7 18 7 pillow pillow Objects n03151500 cushion.n.03 cushion 8 +1117 tripod tripod 7 39 6 stand otherfurniture Furniture n04485082 tripod.n.01 objects 39 +540 mailbox mailbox 7 29 7 box box Objects mailbox 3710193 n03710193 mailbox.n.01 misc 40 +1205 tupperware tupperware 7 40 7 otherprop Objects objects 39 +415 shoe rack shoe rack 7 40 7 shoe rack otherprop Objects shelving 31 +31 towels towel 6 27 7 towel towel Objects n04459362 towel.n.01 towel 20 +1206 beer bottles beer bottle 6 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +153 treadmill treadmill 6 39 6 treadmill otherfurniture Furniture n04477387 treadmill.n.01 gym_equipment 33 +1207 salt salt 6 40 7 otherprop Objects objects 39 +129 chest chest 6 39 6 chest otherfurniture Furniture dresser dresser chest_of_drawers 13 +220 dispenser dispenser 6 40 7 otherprop Objects n03210683 dispenser.n.01 objects 39 +1208 mirror doors mirror door 6 8 12 door door Wall door n03221720 door.n.01 door 4 +231 remote remote 6 40 7 otherprop Objects remote_control 4074963 n04074963 remote_control.n.01 objects 39 +1209 folded ladder folded ladder 6 39 6 ladder otherfurniture Furniture stairs n03632277 ladder.n.01 misc 40 +39 cushion cushion 6 18 7 pillow pillow Objects n03151500 cushion.n.03 cushion 8 +1210 carton carton 6 40 7 otherprop Objects objects 39 +117 step step 6 38 7 otherstructure Objects n04314914 step.n.04 misc 40 +822 drying rack drying rack 6 39 6 drying rack otherfurniture Furniture shelving 31 +238 slippers slipper 6 40 7 shoe otherprop Objects n04241394 slipper.n.01 clothes 38 +143 pool table pool table 6 39 6 pool table otherfurniture Furniture table table table 4379243 n03982430 pool_table.n.01 table 5 +1211 soda stream soda stream 6 40 7 otherprop Objects objects 39 +228 toilet brush toilet brush 6 40 7 toilet brush otherprop Objects objects 39 +494 loft bed loft bed 6 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +226 cooking pot cooking pot 6 40 7 pot otherprop Objects objects 39 +91 heater heater 6 39 6 heater otherfurniture Furniture n03508101 heater.n.01 misc 40 +1072 messenger bag messenger bag 6 37 7 bag bag Objects objects 39 +435 stapler stapler 6 40 7 stapler otherprop Objects n04303497 stapler.n.01 objects 39 +1165 closet walls closet wall 5 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +345 scanner scanner 5 40 7 otherprop Objects appliances 37 +893 elliptical machine elliptical machine 5 40 7 machine otherprop Objects gym_equipment 33 +621 kettle kettle 5 40 7 pot otherprop Objects n03612814 kettle.n.01 objects 39 +1212 metronome metronome 5 40 7 otherprop Objects n03757604 metronome.n.01 objects 39 +297 dumbell dumbell 5 40 7 otherprop Objects objects 39 +1213 music book music book 5 23 2 book books Books n02870526 book.n.11 objects 39 +1214 rice cooker rice cooker 5 40 7 otherprop Objects objects 39 +1215 dart board dart board 5 38 7 board otherstructure Objects n03162940 dartboard.n.01 objects 39 +529 sewing machine sewing machine 5 40 7 sewing machine otherprop Objects n04179913 sewing_machine.n.01 objects 39 +1216 grab bar grab bar 5 38 7 railing otherstructure Objects railing 30 +1217 flowerpot flowerpot 5 40 7 vase otherprop Objects vase jar 3593526 n04522168 vase.n.01 objects 39 +1218 painting painting 5 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +1219 railing railing 5 38 7 railing otherstructure Objects n04047401 railing.n.01 railing 30 +1220 stair stair 5 38 7 stairs otherstructure Objects stairs n04314914 step.n.04 stairs 16 +525 toolbox toolbox 5 39 6 chest otherfurniture Furniture n04452615 toolbox.n.01 objects 39 +204 nerf gun nerf gun 5 40 7 otherprop Objects objects 39 +693 binders binder 5 40 7 binder otherprop Objects objects 39 +179 desk lamp desk lamp 5 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +1221 quadcopter quadcopter 5 40 7 otherprop Objects objects 39 +1222 pitcher pitcher 5 40 7 pitcher otherprop Objects n03950228 pitcher.n.02 objects 39 +1223 hanging hanging 5 40 7 otherprop Objects misc 40 +1224 mail mail 5 40 7 otherprop Objects misc 40 +1225 closet ceiling closet ceiling 5 22 3 ceiling ceiling Ceiling n02990373 ceiling.n.01 ceiling 17 +1226 hoverboard hoverboard 5 40 7 otherprop Objects objects 39 +1227 beanbag chair beanbag chair 5 39 6 bean bag otherfurniture Furniture n02816656 beanbag.n.01 chair 3 +571 water heater water heater 5 40 7 water heater otherprop Objects n04560113 water_heater.n.01 misc 40 +1228 spray bottle spray bottle 5 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +556 rope rope 5 40 7 rope otherprop Objects n04108268 rope.n.01 objects 39 +280 plastic container plastic container 5 40 7 container otherprop Objects objects 39 +1229 soap bottle soap bottle 5 40 7 soap otherprop Objects objects 39 +1230 ikea bag ikea bag 4 37 7 bag bag Objects 2773838 n02773838 bag.n.06 objects 39 +1231 sleeping bag sleeping bag 4 40 7 otherprop Objects n04235860 sleeping_bag.n.01 objects 39 +1232 duffel bag duffel bag 4 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +746 frying pan frying pan 4 40 7 frying pan otherprop Objects n03400231 frying_pan.n.01 objects 39 +1233 oven mitt oven mitt 4 40 7 otherprop Objects objects 39 +1234 pot pot 4 40 7 pot otherprop Objects n04235860 sleeping_bag.n.01 objects 39 +144 hand dryer hand dryer 4 40 7 otherprop Objects objects 39 +282 dollhouse dollhouse 4 39 6 doll house otherfurniture Furniture n03219483 dollhouse.n.01 objects 39 +167 shampoo bottle shampoo bottle 4 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1235 hair brush hair brush 4 40 7 otherprop Objects n02908217 brush.n.02 objects 39 +1236 tennis racket tennis racket 4 40 7 otherprop Objects n04409806 tennis_racket.n.01 objects 39 +1237 display case display case 4 40 7 case otherprop Objects objects 39 +234 ping pong table ping pong table 4 39 6 ping pong table otherfurniture Furniture table table table 4379243 n04379243 table.n.02 table 5 +563 boiler boiler 4 40 7 otherprop Objects misc 40 +1238 bag of coffee beans bag of coffee beans 4 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +1239 bananas banana 4 40 7 otherprop Objects n00021265 food.n.01 objects 39 +1240 carseat carseat 4 40 7 otherprop Objects misc 40 +366 helmet helmet 4 40 7 otherprop Objects helmet 3513137 n03513137 helmet.n.02 clothes 38 +816 umbrella umbrella 4 40 7 umbrella otherprop Objects n04507155 umbrella.n.01 objects 39 +1241 coffee box coffee box 4 40 7 otherprop Objects objects 39 +719 envelope envelope 4 40 7 envelope otherprop Objects n03291819 envelope.n.01 objects 39 +284 wet floor sign wet floor sign 4 40 7 sign otherprop Objects misc 40 +1242 clothing rack clothing rack 4 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +247 controller controller 4 40 7 otherprop Objects n03096960 control.n.09 objects 39 +1243 bath walls bathroom wall 4 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +1244 podium podium 4 39 6 otherfurniture Furniture n03159640 dais.n.01 furniture 36 +1245 storage box storage box 4 29 7 box box Objects n02883344 box.n.01 objects 39 +1246 dolly dolly 4 40 7 otherprop Objects misc 40 +1247 shampoo shampoo 3 40 7 otherprop Objects n04183516 shampoo.n.01 objects 39 +592 paper tray paper tray 3 40 7 paper tray otherprop Objects objects 39 +385 cabinet door cabinet door 3 8 12 door door Wall door door 4 +1248 changing station changing station 3 40 7 otherprop Objects misc 40 +1249 poster printer poster printer 3 40 7 printer otherprop Objects printer 4004475 n04004475 printer.n.03 appliances 37 +133 screen screen 3 40 7 otherprop Objects n03151077 curtain.n.01 curtain 12 +301 soap bar soap bar 3 38 7 bar otherstructure Objects objects 39 +1250 crutches crutches 3 40 7 otherprop Objects n03141823 crutch.n.01 objects 39 +379 studio light studio light 3 38 7 light otherstructure Objects lighting 28 +130 stack of cups cup 3 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +1251 toilet flush button toilet flush button 3 40 7 otherprop Objects objects 39 +450 trunk trunk 3 40 7 otherprop Objects misc 40 +1252 grocery bag grocery bag 3 37 7 bag bag Objects suitcase 2773838 n03461288 grocery_bag.n.01 objects 39 +316 plastic bin plastic bin 3 40 7 bin otherprop Objects objects 39 +1253 pizza box pizza box 3 29 7 box box Objects objects 39 +385 cabinet doors cabinet door 3 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 door 4 +1254 legs legs 3 31 7 person person Objects person n05217688 person.n.02 misc 40 +461 car car 3 40 7 car otherprop Objects car car 2958343 n02958343 car.n.01 misc 40 +1255 shaving cream shaving cream 3 40 7 otherprop Objects n04186051 shaving_cream.n.01 objects 39 +1256 luggage stand luggage stand 3 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +599 shredder shredder 3 40 7 otherprop Objects n04210120 shredder.n.01 objects 39 +281 statue statue 3 40 7 sculpture otherprop Objects n04306847 statue.n.01 misc 40 +1257 urinal urinal 3 33 7 toilet toilet Objects toilet toilet n04515991 urinal.n.01 toilet 18 +1258 hose hose 3 40 7 otherprop Objects n03539875 hose.n.03 misc 40 +1259 bike pump bike pump 3 40 7 otherprop Objects objects 39 +319 coatrack coatrack 3 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 31 +1260 bear bear 3 40 7 otherprop Objects objects 39 +28 wall lamp lamp 3 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +1261 humidifier humidifier 3 40 7 otherprop Objects objects 39 +546 toothpaste toothpaste 3 40 7 toothpaste otherprop Objects objects 39 +1262 mouthwash bottle mouthwash bottle 3 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1263 poster cutter poster cutter 3 40 7 otherprop Objects objects 39 +1264 golf bag golf bag 3 37 7 bag bag Objects suitcase 2773838 n03445617 golf_bag.n.01 objects 39 +1265 food container food container 3 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1266 camera camera 3 40 7 otherprop Objects objects 39 +28 table lamp lamp 3 35 7 lamp lamp Objects lamp lamp 3636649 n04380533 table_lamp.n.01 lighting 28 +1267 yoga mat yoga mat 3 20 5 floor mat floor mat Floor n03727837 mat.n.01 floor 2 +1268 card card 3 40 7 otherprop Objects objects 39 +1269 mug mug 3 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +188 shower doors shower door 3 38 7 otherstructure Objects n04208936 shower.n.01 door 4 +689 cardboard cardboard 3 40 7 otherprop Objects objects 39 +1270 rack stand rack stand 3 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +1271 boxes of paper boxes of paper 3 29 7 box box Objects n02883344 box.n.01 objects 39 +1272 flag flag 3 40 7 otherprop Objects misc 40 +354 futon futon 3 39 6 mattress otherfurniture Furniture n03408444 futon.n.01 sofa 10 +339 magazine magazine 3 40 7 magazine otherprop Objects n06595351 magazine.n.01 objects 39 +1009 exit sign exit sign 3 40 7 exit sign otherprop Objects misc 40 +1273 rolled poster rolled poster 3 40 7 otherprop Objects objects 39 +1274 wheel wheel 3 40 7 otherprop Objects objects 39 +15 pictures picture 3 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +1275 blackboard eraser blackboard eraser 3 40 7 eraser otherprop Objects n03294833 eraser.n.01 objects 39 +361 organizer organizer 3 40 7 otherprop Objects n03918737 personal_digital_assistant.n.01 objects 39 +1276 doll doll 3 40 7 toy otherprop Objects n03219135 doll.n.01 objects 39 +326 book rack book rack 3 39 6 bookrack otherfurniture Furniture objects 39 +1277 laundry bag laundry bag 3 40 7 laundry basket otherprop Objects basket 2801938 n03050864 clothes_hamper.n.01 objects 39 +1278 sponge sponge 3 40 7 otherprop Objects n01906749 sponge.n.04 objects 39 +116 seating seat 3 39 6 furniture otherfurniture Furniture n04161981 seat.n.03 furniture 36 +1184 folded chairs folded chair 2 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1279 lotion bottle lotion bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +212 can can 2 40 7 can otherprop Objects can 2946921 n02946921 can.n.01 objects 39 +1280 lunch box lunch box 2 40 7 otherprop Objects objects 39 +1281 food display food display 2 40 7 otherprop Objects misc 40 +794 storage shelf storage shelf 2 40 7 otherprop Objects shelving 31 +1282 sliding wood door sliding wood door 2 40 7 otherprop Objects door 4 +955 pants pants 2 40 7 otherprop Objects n04489008 trouser.n.01 clothes 38 +387 wood wood 2 40 7 otherprop Objects misc 40 +69 boards board 2 38 7 board otherstructure Objects board_panel 35 +65 bottles bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +523 washcloth washcloth 2 40 7 otherprop Objects n04554523 washcloth.n.01 towel 20 +389 workbench workbench 2 39 6 bench otherfurniture Furniture bench table 4379243 n04600486 workbench.n.01 table 5 +29 open kitchen cabinet kitchen cabinet 2 3 6 cabinet cabinet Furniture n02933112 cabinet.n.01 cabinet 7 +1283 organizer shelf organizer shelf 2 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +146 frame frame 2 38 7 otherstructure Objects misc 40 +130 cups cup 2 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +372 exercise ball exercise ball 2 40 7 ball otherprop Objects n04285146 sports_equipment.n.01 gym_equipment 33 +289 easel easel 2 39 6 stand otherfurniture Furniture n03262809 easel.n.01 furniture 36 +440 garbage bag garbage bag 2 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +321 roomba roomba 2 40 7 otherprop Objects objects 39 +976 garage door garage door 2 38 7 garage door otherstructure Objects door door 4 +1256 luggage rack luggage stand 2 39 6 stand otherfurniture Furniture n04038440 shelving 31 +1284 bike lock bike lock 2 40 7 otherprop Objects objects 39 +1285 briefcase briefcase 2 40 7 otherprop Objects n02900705 briefcase.n.01 objects 39 +357 hand towel hand towel 2 27 7 towel towel Objects n03490006 hand_towel.n.01 towel 20 +1286 bath products bath product 2 40 7 otherprop Objects objects 39 +1287 star star 2 40 7 otherprop Objects n09444783 star.n.03 misc 40 +365 map map 2 40 7 map otherprop Objects n03720163 map.n.01 misc 40 +1288 coffee bean bag coffee bean bag 2 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +81 headboard headboard 2 39 6 headboard otherfurniture Furniture n03502200 headboard.n.01 bed 11 +1289 ipad ipad 2 40 7 otherprop Objects objects 39 +1290 display rack display rack 2 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +948 traffic cone traffic cone 2 40 7 cone otherprop Objects cone objects 39 +174 toiletry toiletry 2 40 7 otherprop Objects n04447443 toiletry.n.01 objects 39 +1028 canopy canopy 2 40 7 otherprop Objects misc 40 +1291 massage chair massage chair 2 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1292 paper organizer paper organizer 2 40 7 otherprop Objects objects 39 +1005 barricade barricade 2 40 7 otherprop Objects misc 40 +235 platform platform 2 38 7 otherstructure Objects misc 40 +1293 cap cap 2 40 7 hat otherprop Objects n03497657 hat.n.01 clothes 38 +1294 dumbbell plates dumbbell plates 2 40 7 otherprop Objects objects 39 +1295 elevator elevator 2 38 7 otherstructure Objects misc 40 +1296 cooking pan cooking pan 2 40 7 pan otherprop Objects n03880531 pan.n.01 objects 39 +1297 trash bag trash bag 2 37 7 bag bag Objects objects 39 +1298 santa santa 2 40 7 otherprop Objects misc 40 +1299 jewelry box jewelry box 2 29 7 box box Objects n02883344 box.n.01 objects 39 +1300 boat boat 2 40 7 otherprop Objects misc 40 +1301 sock sock 2 21 7 clothes clothes Objects n04254777 sock.n.01 clothes 38 +1051 kinect kinect 2 40 7 kinect otherprop Objects objects 39 +566 crib crib 2 39 6 crib otherfurniture Furniture furniture 36 +1302 plastic storage bin plastic storage bin 2 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1062 cooler cooler 2 24 6 refrigerator refrigerator Furniture n03102654 cooler.n.01 appliances 37 +1303 kitchen apron kitchen apron 2 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +1304 dishwashing soap bottle dishwashing soap bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1305 xbox controller xbox controller 2 40 7 otherprop Objects objects 39 +1306 banana holder banana holder 2 40 7 otherprop Objects objects 39 +298 ping pong paddle ping pong paddle 2 40 7 otherprop Objects table 5 +1307 airplane airplane 2 40 7 otherprop Objects misc 40 +1308 conditioner bottle conditioner bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1309 tea kettle tea kettle 2 40 7 tea kettle otherprop Objects n04397768 teakettle.n.01 objects 39 +43 bedframe bedframe 2 39 6 otherfurniture Furniture n02822579 bedstead.n.01 bed 11 +1310 wood beam wood beam 2 38 7 otherstructure Objects beam 29 +593 toilet paper package toilet paper package 2 40 7 otherprop Objects objects 39 +1311 wall mounted coat rack wall mounted coat rack 2 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 31 +1312 film light film light 2 40 7 otherprop Objects lighting 28 +749 ceiling lamp ceiling lamp 1 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +623 chain chain 1 40 7 otherprop Objects chair 3 +1313 sofa sofa 1 6 9 sofa sofa Sofa sofa sofa sofa 4256520 n04256520 sofa.n.01 sofa 10 +99 closet wardrobe wardrobe 1 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +265 sweater sweater 1 40 7 otherprop Objects n04370048 sweater.n.01 clothes 38 +1314 kitchen mixer kitchen mixer 1 40 7 otherprop Objects appliances 37 +99 wardrobe wardrobe 1 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +1315 water softener water softener 1 40 7 otherprop Objects misc 40 +448 banister banister 1 38 7 banister otherstructure Objects n02788148 bannister.n.02 railing 30 +257 trolley trolley 1 40 7 trolley otherprop Objects n04335435 streetcar.n.01 misc 40 +1316 pantry shelf pantry shelf 1 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +786 sofa bed sofa bed 1 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +801 loofa loofa 1 40 7 otherprop Objects objects 39 +972 shower faucet handle shower faucet handle 1 40 7 handle otherprop Objects shower 23 +1317 toy piano toy piano 1 40 7 toy otherprop Objects n03964744 plaything.n.01 objects 39 +1318 fish fish 1 40 7 otherprop Objects n02512053 fish.n.01 objects 39 +75 file cabinets file cabinet 1 3 6 cabinet cabinet Furniture cabinet 2933112 n03337140 file.n.03 cabinet 7 +657 cat litter box cat litter box 1 29 7 box box Objects objects 39 +561 electric panel electric panel 1 40 7 otherprop Objects misc 40 +93 suitcases suitcase 1 40 7 luggage otherprop Objects n02774630 baggage.n.01 objects 39 +513 curtain rod curtain rod 1 38 7 curtain rod otherstructure Objects curtain 12 +411 bunk bed bunk bed 1 39 6 bunk bed otherfurniture Furniture bed bed bed 2818832 n02920259 bunk_bed.n.01 bed 11 +1122 chandelier chandelier 1 38 7 chandelier otherstructure Objects n03005285 chandelier.n.01 lighting 28 +922 tape tape 1 40 7 tape otherprop Objects objects 39 +88 plates plate 1 40 7 otherprop Objects n03959485 plate.n.04 objects 39 +518 alarm alarm 1 40 7 alarm otherprop Objects clock 3046257 n02694662 alarm_clock.n.01 objects 39 +814 fire hose fire hose 1 40 7 otherprop Objects n03346004 fire_hose.n.01 misc 40 +1319 toy dinosaur toy dinosaur 1 40 7 toy otherprop Objects n03964744 plaything.n.01 objects 39 +1320 cone cone 1 40 7 otherprop Objects objects 39 +649 glass doors glass door 1 8 12 door door Wall door n03221720 door.n.01 door 4 +607 hatrack hatrack 1 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 31 +819 subwoofer subwoofer 1 40 7 speaker otherprop Objects speaker 3691459 n04349401 subwoofer.n.01 objects 39 +1321 fire sprinkler fire sprinkler 1 40 7 otherprop Objects misc 40 +1322 trash cabinet trash cabinet 1 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +1204 pantry walls pantry wall 1 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +227 photo photo 1 40 7 photo otherprop Objects n03925226 photograph.n.01 picture 6 +817 barrier barrier 1 40 7 otherprop Objects n02796623 barrier.n.01 misc 40 +130 stacks of cups cup 1 40 7 otherprop Objects n03147509 cup.n.01 objects 39 +712 beachball beachball 1 40 7 ball otherprop Objects n02814224 beach_ball.n.01 objects 39 +1323 folded boxes folded boxes 1 40 7 otherprop Objects objects 39 +1324 contact lens solution bottle contact lens solution bottle 1 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +673 covered box covered box 1 29 7 box box Objects objects 39 +459 folder folder 1 40 7 folder otherprop Objects n03376279 folder.n.02 objects 39 +643 mail trays mail tray 1 40 7 mail tray otherprop Objects objects 39 +238 slipper slipper 1 40 7 otherprop Objects n04241394 slipper.n.01 clothes 38 +765 magazine rack magazine rack 1 39 6 stand otherfurniture Furniture n03704549 magazine_rack.n.01 shelving 31 +1008 sticker sticker 1 40 7 sticker otherprop Objects n07272545 gummed_label.n.01 objects 39 +225 lotion lotion 1 40 7 otherprop Objects n03690938 lotion.n.01 objects 39 +1083 buddha buddha 1 40 7 otherprop Objects objects 39 +813 file organizer file organizer 1 40 7 otherprop Objects objects 39 +138 paper towel rolls paper towel roll 1 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01 towel 20 +1145 night lamp night lamp 1 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +796 fuse box fuse box 1 40 7 otherprop Objects misc 40 +1325 knife block knife block 1 40 7 otherprop Objects objects 39 +363 furnace furnace 1 39 6 furnace otherfurniture Furniture n03404449 furnace.n.01 +1174 cd cases cd case 1 40 7 otherprop Objects objects 39 +38 stools stool 1 40 7 stool otherprop Objects stool n04326896 stool.n.01 stool 19 +1326 hand sanitzer dispenser hand sanitzer dispenser 1 40 7 otherprop Objects n04254120 soap_dispenser.n.01 objects 39 +997 teapot teapot 1 40 7 tea pot otherprop Objects n04398044 teapot.n.01 objects 39 +1327 pen holder pen holder 1 40 7 otherprop Objects objects 39 +1328 tray rack tray rack 1 40 7 otherprop Objects objects 39 +1329 wig wig 1 40 7 otherprop Objects n04584207 wig.n.01 objects 39 +182 switch switch 1 40 7 otherprop Objects n04372370 switch.n.01 misc 40 +280 plastic containers plastic container 1 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1330 night light night light 1 40 7 otherprop Objects lighting 28 +1331 notepad notepad 1 40 7 otherprop Objects objects 39 +1332 mail bin mail bin 1 40 7 otherprop Objects misc 40 +1333 elevator button elevator button 1 40 7 otherprop Objects misc 40 +939 gaming wheel gaming wheel 1 40 7 otherprop Objects objects 39 +1334 drum set drum set 1 40 7 otherprop Objects objects 39 +480 cosmetic bag cosmetic bag 1 37 7 bag bag Objects objects 39 +907 coffee mug coffee mug 1 40 7 vessel otherprop Objects cup or mug 3797390 n03063599 coffee_mug.n.01 objects 39 +1335 closet shelf closet shelf 1 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +1336 baby mobile baby mobile 1 40 7 otherprop Objects objects 39 +829 diaper bin diaper bin 1 40 7 bin otherprop Objects objects 39 +947 door wall door wall 1 1 12 wall wall Wall wall 1 +1116 stepstool stepstool 1 40 7 step stool otherprop Objects objects 39 +599 paper shredder shredder 1 40 7 otherprop Objects n04210120 shredder.n.01 objects 39 +733 dress rack dress rack 1 40 7 otherprop Objects n03238762 dress_rack.n.01 misc 40 +123 cover cover 1 40 7 blanket otherprop Objects objects 39 +506 shopping bag shopping bag 1 37 7 bag bag Objects n04204081 shopping_bag.n.01 objects 39 +569 sliding door sliding door 1 8 12 door door Wall door n04239074 sliding_door.n.01 door 4 +1337 exercise bike exercise bike 1 40 7 machine otherprop Objects n04210120 shredder.n.01 gym_equipment 33 +1338 recliner chair recliner chair 1 5 4 chair chair Chair chair chair chair 3001627 n03238762 dress_rack.n.01 chair 3 +1314 kitchenaid mixer kitchen mixer 1 40 7 otherprop Objects appliances 37 +1339 soda can soda can 1 40 7 can otherprop Objects can 2946921 n02946921 can.n.01 objects 39 +1340 stovetop stovetop 1 38 7 stove otherstructure Objects stove 4330267 n04330267 stove.n.02 appliances 37 +851 stepladder stepladder 1 39 6 ladder otherfurniture Furniture stairs n04315599 step_ladder.n.01 stairs 16 +142 tap tap 1 40 7 faucet otherprop Objects faucet 3325088 n04559451 water_faucet.n.01 objects 39 +436 cable cable 1 40 7 cables otherprop Objects objects 39 +1341 baby changing station baby changing station 1 39 6 otherfurniture Furniture furniture 36 +1342 costume costume 1 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +885 rocking chair rocking chair 1 5 4 chair chair Chair chair chair chair 3001627 n04099969 rocking_chair.n.01 chair 3 +693 binder binder 1 40 7 binder otherprop Objects objects 39 +815 media center media center 1 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +401 towel rack towel rack 1 40 7 otherprop Objects n04459773 towel_rack.n.01 misc 40 +1343 medal medal 1 40 7 otherprop Objects objects 39 +1184 stack of folded chairs folded chair 1 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1344 telescope telescope 1 40 7 otherprop Objects n04403638 telescope.n.01 objects 39 +1345 closet doorframe closet doorframe 1 8 12 door door Wall door door 4 +160 glass glass 1 38 7 glass otherstructure Objects n03438257 glass.n.02 misc 40 +1126 baseball cap baseball cap 1 40 7 otherprop Objects cap 2954340 n02799323 baseball_cap.n.01 clothes 38 +1346 battery disposal jar battery disposal jar 1 40 7 jar otherprop Objects jar 3593526 n03593526 jar.n.01 objects 39 +332 mop mop 1 40 7 otherprop Objects n04367480 swab.n.02 objects 39 +397 tank tank 1 40 7 otherprop Objects objects 39 +643 mail tray mail tray 1 40 7 mail tray otherprop Objects objects 39 +551 centerpiece centerpiece 1 40 7 centerpiece otherprop Objects n02994419 centerpiece.n.02 objects 39 +1163 stick stick 1 40 7 stick otherprop Objects objects 39 +1347 closet floor closet floor 1 2 5 floor floor Floor n03365592 floor.n.01 floor 2 +1348 dryer sheets dryer sheets 1 40 7 otherprop Objects objects 39 +803 bycicle bycicle 1 40 7 otherprop Objects misc 40 +484 flower stand flower stand 1 39 6 stand otherfurniture Furniture furniture 36 +1349 air mattress air mattress 1 4 1 bed bed Bed bed bed bed 2818832 n02690809 air_mattress.n.01 bed 11 +1350 clip clip 1 40 7 otherprop Objects objects 39 +222 side table side table 1 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +1253 pizza boxes pizza box 1 29 7 box box Objects n02883344 box.n.01 objects 39 +1351 display display 1 39 7 otherfurniture Furniture n03211117 display.n.06 misc 40 +1352 postcard postcard 1 40 7 otherprop Objects objects 39 +828 display sign display sign 1 40 7 sign otherprop Objects misc 40 +1353 paper towel paper towel 1 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01 towel 20 +612 boots boot 1 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +1354 tennis racket bag tennis racket bag 1 40 7 otherprop Objects objects 39 +1355 air hockey table air hockey table 1 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +1301 socks sock 1 21 7 clothes clothes Objects n04254777 sock.n.01 clothes 38 +1356 food bag food bag 1 37 7 bag bag Objects objects 39 +1199 clothes hangers clothes hanger 1 40 7 otherprop Objects n03057920 coat_hanger.n.01 misc 40 +1357 starbucks cup starbucks cup 1 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 diff --git a/models/LL3DA/data/scannet/meta_data/scannetv2.txt b/models/LL3DA/data/scannet/meta_data/scannetv2.txt new file mode 100644 index 0000000..2c242ef --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/scannetv2.txt @@ -0,0 +1,1613 @@ +scene0000_00 +scene0000_01 +scene0000_02 +scene0001_00 +scene0001_01 +scene0002_00 +scene0002_01 +scene0003_00 +scene0003_01 +scene0003_02 +scene0004_00 +scene0005_00 +scene0005_01 +scene0006_00 +scene0006_01 +scene0006_02 +scene0007_00 +scene0008_00 +scene0009_00 +scene0009_01 +scene0009_02 +scene0010_00 +scene0010_01 +scene0011_00 +scene0011_01 +scene0012_00 +scene0012_01 +scene0012_02 +scene0013_00 +scene0013_01 +scene0013_02 +scene0014_00 +scene0015_00 +scene0016_00 +scene0016_01 +scene0016_02 +scene0017_00 +scene0017_01 +scene0017_02 +scene0018_00 +scene0019_00 +scene0019_01 +scene0020_00 +scene0020_01 +scene0021_00 +scene0022_00 +scene0022_01 +scene0023_00 +scene0024_00 +scene0024_01 +scene0024_02 +scene0025_00 +scene0025_01 +scene0025_02 +scene0026_00 +scene0027_00 +scene0027_01 +scene0027_02 +scene0028_00 +scene0029_00 +scene0029_01 +scene0029_02 +scene0030_00 +scene0030_01 +scene0030_02 +scene0031_00 +scene0031_01 +scene0031_02 +scene0032_00 +scene0032_01 +scene0033_00 +scene0034_00 +scene0034_01 +scene0034_02 +scene0035_00 +scene0035_01 +scene0036_00 +scene0036_01 +scene0037_00 +scene0038_00 +scene0038_01 +scene0038_02 +scene0039_00 +scene0039_01 +scene0040_00 +scene0040_01 +scene0041_00 +scene0041_01 +scene0042_00 +scene0042_01 +scene0042_02 +scene0043_00 +scene0043_01 +scene0044_00 +scene0044_01 +scene0044_02 +scene0045_00 +scene0045_01 +scene0046_00 +scene0046_01 +scene0046_02 +scene0047_00 +scene0048_00 +scene0048_01 +scene0049_00 +scene0050_00 +scene0050_01 +scene0050_02 +scene0051_00 +scene0051_01 +scene0051_02 +scene0051_03 +scene0052_00 +scene0052_01 +scene0052_02 +scene0053_00 +scene0054_00 +scene0055_00 +scene0055_01 +scene0055_02 +scene0056_00 +scene0056_01 +scene0057_00 +scene0057_01 +scene0058_00 +scene0058_01 +scene0059_00 +scene0059_01 +scene0059_02 +scene0060_00 +scene0060_01 +scene0061_00 +scene0061_01 +scene0062_00 +scene0062_01 +scene0062_02 +scene0063_00 +scene0064_00 +scene0064_01 +scene0065_00 +scene0065_01 +scene0065_02 +scene0066_00 +scene0067_00 +scene0067_01 +scene0067_02 +scene0068_00 +scene0068_01 +scene0069_00 +scene0070_00 +scene0071_00 +scene0072_00 +scene0072_01 +scene0072_02 +scene0073_00 +scene0073_01 +scene0073_02 +scene0073_03 +scene0074_00 +scene0074_01 +scene0074_02 +scene0075_00 +scene0076_00 +scene0077_00 +scene0077_01 +scene0078_00 +scene0078_01 +scene0078_02 +scene0079_00 +scene0079_01 +scene0080_00 +scene0080_01 +scene0080_02 +scene0081_00 +scene0081_01 +scene0081_02 +scene0082_00 +scene0083_00 +scene0083_01 +scene0084_00 +scene0084_01 +scene0084_02 +scene0085_00 +scene0085_01 +scene0086_00 +scene0086_01 +scene0086_02 +scene0087_00 +scene0087_01 +scene0087_02 +scene0088_00 +scene0088_01 +scene0088_02 +scene0088_03 +scene0089_00 +scene0089_01 +scene0089_02 +scene0090_00 +scene0091_00 +scene0092_00 +scene0092_01 +scene0092_02 +scene0092_03 +scene0092_04 +scene0093_00 +scene0093_01 +scene0093_02 +scene0094_00 +scene0095_00 +scene0095_01 +scene0096_00 +scene0096_01 +scene0096_02 +scene0097_00 +scene0098_00 +scene0098_01 +scene0099_00 +scene0099_01 +scene0100_00 +scene0100_01 +scene0100_02 +scene0101_00 +scene0101_01 +scene0101_02 +scene0101_03 +scene0101_04 +scene0101_05 +scene0102_00 +scene0102_01 +scene0103_00 +scene0103_01 +scene0104_00 +scene0105_00 +scene0105_01 +scene0105_02 +scene0106_00 +scene0106_01 +scene0106_02 +scene0107_00 +scene0108_00 +scene0109_00 +scene0109_01 +scene0110_00 +scene0110_01 +scene0110_02 +scene0111_00 +scene0111_01 +scene0111_02 +scene0112_00 +scene0112_01 +scene0112_02 +scene0113_00 +scene0113_01 +scene0114_00 +scene0114_01 +scene0114_02 +scene0115_00 +scene0115_01 +scene0115_02 +scene0116_00 +scene0116_01 +scene0116_02 +scene0117_00 +scene0118_00 +scene0118_01 +scene0118_02 +scene0119_00 +scene0120_00 +scene0120_01 +scene0121_00 +scene0121_01 +scene0121_02 +scene0122_00 +scene0122_01 +scene0123_00 +scene0123_01 +scene0123_02 +scene0124_00 +scene0124_01 +scene0125_00 +scene0126_00 +scene0126_01 +scene0126_02 +scene0127_00 +scene0127_01 +scene0128_00 +scene0129_00 +scene0130_00 +scene0131_00 +scene0131_01 +scene0131_02 +scene0132_00 +scene0132_01 +scene0132_02 +scene0133_00 +scene0134_00 +scene0134_01 +scene0134_02 +scene0135_00 +scene0136_00 +scene0136_01 +scene0136_02 +scene0137_00 +scene0137_01 +scene0137_02 +scene0138_00 +scene0139_00 +scene0140_00 +scene0140_01 +scene0141_00 +scene0141_01 +scene0141_02 +scene0142_00 +scene0142_01 +scene0143_00 +scene0143_01 +scene0143_02 +scene0144_00 +scene0144_01 +scene0145_00 +scene0146_00 +scene0146_01 +scene0146_02 +scene0147_00 +scene0147_01 +scene0148_00 +scene0149_00 +scene0150_00 +scene0150_01 +scene0150_02 +scene0151_00 +scene0151_01 +scene0152_00 +scene0152_01 +scene0152_02 +scene0153_00 +scene0153_01 +scene0154_00 +scene0155_00 +scene0155_01 +scene0155_02 +scene0156_00 +scene0157_00 +scene0157_01 +scene0158_00 +scene0158_01 +scene0158_02 +scene0159_00 +scene0160_00 +scene0160_01 +scene0160_02 +scene0160_03 +scene0160_04 +scene0161_00 +scene0161_01 +scene0161_02 +scene0162_00 +scene0163_00 +scene0163_01 +scene0164_00 +scene0164_01 +scene0164_02 +scene0164_03 +scene0165_00 +scene0165_01 +scene0165_02 +scene0166_00 +scene0166_01 +scene0166_02 +scene0167_00 +scene0168_00 +scene0168_01 +scene0168_02 +scene0169_00 +scene0169_01 +scene0170_00 +scene0170_01 +scene0170_02 +scene0171_00 +scene0171_01 +scene0172_00 +scene0172_01 +scene0173_00 +scene0173_01 +scene0173_02 +scene0174_00 +scene0174_01 +scene0175_00 +scene0176_00 +scene0177_00 +scene0177_01 +scene0177_02 +scene0178_00 +scene0179_00 +scene0180_00 +scene0181_00 +scene0181_01 +scene0181_02 +scene0181_03 +scene0182_00 +scene0182_01 +scene0182_02 +scene0183_00 +scene0184_00 +scene0185_00 +scene0186_00 +scene0186_01 +scene0187_00 +scene0187_01 +scene0188_00 +scene0189_00 +scene0190_00 +scene0191_00 +scene0191_01 +scene0191_02 +scene0192_00 +scene0192_01 +scene0192_02 +scene0193_00 +scene0193_01 +scene0194_00 +scene0195_00 +scene0195_01 +scene0195_02 +scene0196_00 +scene0197_00 +scene0197_01 +scene0197_02 +scene0198_00 +scene0199_00 +scene0200_00 +scene0200_01 +scene0200_02 +scene0201_00 +scene0201_01 +scene0201_02 +scene0202_00 +scene0203_00 +scene0203_01 +scene0203_02 +scene0204_00 +scene0204_01 +scene0204_02 +scene0205_00 +scene0205_01 +scene0205_02 +scene0206_00 +scene0206_01 +scene0206_02 +scene0207_00 +scene0207_01 +scene0207_02 +scene0208_00 +scene0209_00 +scene0209_01 +scene0209_02 +scene0210_00 +scene0210_01 +scene0211_00 +scene0211_01 +scene0211_02 +scene0211_03 +scene0212_00 +scene0212_01 +scene0212_02 +scene0213_00 +scene0214_00 +scene0214_01 +scene0214_02 +scene0215_00 +scene0215_01 +scene0216_00 +scene0217_00 +scene0218_00 +scene0218_01 +scene0219_00 +scene0220_00 +scene0220_01 +scene0220_02 +scene0221_00 +scene0221_01 +scene0222_00 +scene0222_01 +scene0223_00 +scene0223_01 +scene0223_02 +scene0224_00 +scene0225_00 +scene0226_00 +scene0226_01 +scene0227_00 +scene0228_00 +scene0229_00 +scene0229_01 +scene0229_02 +scene0230_00 +scene0231_00 +scene0231_01 +scene0231_02 +scene0232_00 +scene0232_01 +scene0232_02 +scene0233_00 +scene0233_01 +scene0234_00 +scene0235_00 +scene0236_00 +scene0236_01 +scene0237_00 +scene0237_01 +scene0238_00 +scene0238_01 +scene0239_00 +scene0239_01 +scene0239_02 +scene0240_00 +scene0241_00 +scene0241_01 +scene0241_02 +scene0242_00 +scene0242_01 +scene0242_02 +scene0243_00 +scene0244_00 +scene0244_01 +scene0245_00 +scene0246_00 +scene0247_00 +scene0247_01 +scene0248_00 +scene0248_01 +scene0248_02 +scene0249_00 +scene0250_00 +scene0250_01 +scene0250_02 +scene0251_00 +scene0252_00 +scene0253_00 +scene0254_00 +scene0254_01 +scene0255_00 +scene0255_01 +scene0255_02 +scene0256_00 +scene0256_01 +scene0256_02 +scene0257_00 +scene0258_00 +scene0259_00 +scene0259_01 +scene0260_00 +scene0260_01 +scene0260_02 +scene0261_00 +scene0261_01 +scene0261_02 +scene0261_03 +scene0262_00 +scene0262_01 +scene0263_00 +scene0263_01 +scene0264_00 +scene0264_01 +scene0264_02 +scene0265_00 +scene0265_01 +scene0265_02 +scene0266_00 +scene0266_01 +scene0267_00 +scene0268_00 +scene0268_01 +scene0268_02 +scene0269_00 +scene0269_01 +scene0269_02 +scene0270_00 +scene0270_01 +scene0270_02 +scene0271_00 +scene0271_01 +scene0272_00 +scene0272_01 +scene0273_00 +scene0273_01 +scene0274_00 +scene0274_01 +scene0274_02 +scene0275_00 +scene0276_00 +scene0276_01 +scene0277_00 +scene0277_01 +scene0277_02 +scene0278_00 +scene0278_01 +scene0279_00 +scene0279_01 +scene0279_02 +scene0280_00 +scene0280_01 +scene0280_02 +scene0281_00 +scene0282_00 +scene0282_01 +scene0282_02 +scene0283_00 +scene0284_00 +scene0285_00 +scene0286_00 +scene0286_01 +scene0286_02 +scene0286_03 +scene0287_00 +scene0288_00 +scene0288_01 +scene0288_02 +scene0289_00 +scene0289_01 +scene0290_00 +scene0291_00 +scene0291_01 +scene0291_02 +scene0292_00 +scene0292_01 +scene0293_00 +scene0293_01 +scene0294_00 +scene0294_01 +scene0294_02 +scene0295_00 +scene0295_01 +scene0296_00 +scene0296_01 +scene0297_00 +scene0297_01 +scene0297_02 +scene0298_00 +scene0299_00 +scene0299_01 +scene0300_00 +scene0300_01 +scene0301_00 +scene0301_01 +scene0301_02 +scene0302_00 +scene0302_01 +scene0303_00 +scene0303_01 +scene0303_02 +scene0304_00 +scene0305_00 +scene0305_01 +scene0306_00 +scene0306_01 +scene0307_00 +scene0307_01 +scene0307_02 +scene0308_00 +scene0309_00 +scene0309_01 +scene0310_00 +scene0310_01 +scene0310_02 +scene0311_00 +scene0312_00 +scene0312_01 +scene0312_02 +scene0313_00 +scene0313_01 +scene0313_02 +scene0314_00 +scene0315_00 +scene0316_00 +scene0317_00 +scene0317_01 +scene0318_00 +scene0319_00 +scene0320_00 +scene0320_01 +scene0320_02 +scene0320_03 +scene0321_00 +scene0322_00 +scene0323_00 +scene0323_01 +scene0324_00 +scene0324_01 +scene0325_00 +scene0325_01 +scene0326_00 +scene0327_00 +scene0328_00 +scene0329_00 +scene0329_01 +scene0329_02 +scene0330_00 +scene0331_00 +scene0331_01 +scene0332_00 +scene0332_01 +scene0332_02 +scene0333_00 +scene0334_00 +scene0334_01 +scene0334_02 +scene0335_00 +scene0335_01 +scene0335_02 +scene0336_00 +scene0336_01 +scene0337_00 +scene0337_01 +scene0337_02 +scene0338_00 +scene0338_01 +scene0338_02 +scene0339_00 +scene0340_00 +scene0340_01 +scene0340_02 +scene0341_00 +scene0341_01 +scene0342_00 +scene0343_00 +scene0344_00 +scene0344_01 +scene0345_00 +scene0345_01 +scene0346_00 +scene0346_01 +scene0347_00 +scene0347_01 +scene0347_02 +scene0348_00 +scene0348_01 +scene0348_02 +scene0349_00 +scene0349_01 +scene0350_00 +scene0350_01 +scene0350_02 +scene0351_00 +scene0351_01 +scene0352_00 +scene0352_01 +scene0352_02 +scene0353_00 +scene0353_01 +scene0353_02 +scene0354_00 +scene0355_00 +scene0355_01 +scene0356_00 +scene0356_01 +scene0356_02 +scene0357_00 +scene0357_01 +scene0358_00 +scene0358_01 +scene0358_02 +scene0359_00 +scene0359_01 +scene0360_00 +scene0361_00 +scene0361_01 +scene0361_02 +scene0362_00 +scene0362_01 +scene0362_02 +scene0362_03 +scene0363_00 +scene0364_00 +scene0364_01 +scene0365_00 +scene0365_01 +scene0365_02 +scene0366_00 +scene0367_00 +scene0367_01 +scene0368_00 +scene0368_01 +scene0369_00 +scene0369_01 +scene0369_02 +scene0370_00 +scene0370_01 +scene0370_02 +scene0371_00 +scene0371_01 +scene0372_00 +scene0373_00 +scene0373_01 +scene0374_00 +scene0375_00 +scene0375_01 +scene0375_02 +scene0376_00 +scene0376_01 +scene0376_02 +scene0377_00 +scene0377_01 +scene0377_02 +scene0378_00 +scene0378_01 +scene0378_02 +scene0379_00 +scene0380_00 +scene0380_01 +scene0380_02 +scene0381_00 +scene0381_01 +scene0381_02 +scene0382_00 +scene0382_01 +scene0383_00 +scene0383_01 +scene0383_02 +scene0384_00 +scene0385_00 +scene0385_01 +scene0385_02 +scene0386_00 +scene0387_00 +scene0387_01 +scene0387_02 +scene0388_00 +scene0388_01 +scene0389_00 +scene0390_00 +scene0391_00 +scene0392_00 +scene0392_01 +scene0392_02 +scene0393_00 +scene0393_01 +scene0393_02 +scene0394_00 +scene0394_01 +scene0395_00 +scene0395_01 +scene0395_02 +scene0396_00 +scene0396_01 +scene0396_02 +scene0397_00 +scene0397_01 +scene0398_00 +scene0398_01 +scene0399_00 +scene0399_01 +scene0400_00 +scene0400_01 +scene0401_00 +scene0402_00 +scene0403_00 +scene0403_01 +scene0404_00 +scene0404_01 +scene0404_02 +scene0405_00 +scene0406_00 +scene0406_01 +scene0406_02 +scene0407_00 +scene0407_01 +scene0408_00 +scene0408_01 +scene0409_00 +scene0409_01 +scene0410_00 +scene0410_01 +scene0411_00 +scene0411_01 +scene0411_02 +scene0412_00 +scene0412_01 +scene0413_00 +scene0414_00 +scene0415_00 +scene0415_01 +scene0415_02 +scene0416_00 +scene0416_01 +scene0416_02 +scene0416_03 +scene0416_04 +scene0417_00 +scene0418_00 +scene0418_01 +scene0418_02 +scene0419_00 +scene0419_01 +scene0419_02 +scene0420_00 +scene0420_01 +scene0420_02 +scene0421_00 +scene0421_01 +scene0421_02 +scene0422_00 +scene0423_00 +scene0423_01 +scene0423_02 +scene0424_00 +scene0424_01 +scene0424_02 +scene0425_00 +scene0425_01 +scene0426_00 +scene0426_01 +scene0426_02 +scene0426_03 +scene0427_00 +scene0428_00 +scene0428_01 +scene0429_00 +scene0430_00 +scene0430_01 +scene0431_00 +scene0432_00 +scene0432_01 +scene0433_00 +scene0434_00 +scene0434_01 +scene0434_02 +scene0435_00 +scene0435_01 +scene0435_02 +scene0435_03 +scene0436_00 +scene0437_00 +scene0437_01 +scene0438_00 +scene0439_00 +scene0439_01 +scene0440_00 +scene0440_01 +scene0440_02 +scene0441_00 +scene0442_00 +scene0443_00 +scene0444_00 +scene0444_01 +scene0445_00 +scene0445_01 +scene0446_00 +scene0446_01 +scene0447_00 +scene0447_01 +scene0447_02 +scene0448_00 +scene0448_01 +scene0448_02 +scene0449_00 +scene0449_01 +scene0449_02 +scene0450_00 +scene0451_00 +scene0451_01 +scene0451_02 +scene0451_03 +scene0451_04 +scene0451_05 +scene0452_00 +scene0452_01 +scene0452_02 +scene0453_00 +scene0453_01 +scene0454_00 +scene0455_00 +scene0456_00 +scene0456_01 +scene0457_00 +scene0457_01 +scene0457_02 +scene0458_00 +scene0458_01 +scene0459_00 +scene0459_01 +scene0460_00 +scene0461_00 +scene0462_00 +scene0463_00 +scene0463_01 +scene0464_00 +scene0465_00 +scene0465_01 +scene0466_00 +scene0466_01 +scene0467_00 +scene0468_00 +scene0468_01 +scene0468_02 +scene0469_00 +scene0469_01 +scene0469_02 +scene0470_00 +scene0470_01 +scene0471_00 +scene0471_01 +scene0471_02 +scene0472_00 +scene0472_01 +scene0472_02 +scene0473_00 +scene0473_01 +scene0474_00 +scene0474_01 +scene0474_02 +scene0474_03 +scene0474_04 +scene0474_05 +scene0475_00 +scene0475_01 +scene0475_02 +scene0476_00 +scene0476_01 +scene0476_02 +scene0477_00 +scene0477_01 +scene0478_00 +scene0478_01 +scene0479_00 +scene0479_01 +scene0479_02 +scene0480_00 +scene0480_01 +scene0481_00 +scene0481_01 +scene0482_00 +scene0482_01 +scene0483_00 +scene0484_00 +scene0484_01 +scene0485_00 +scene0486_00 +scene0487_00 +scene0487_01 +scene0488_00 +scene0488_01 +scene0489_00 +scene0489_01 +scene0489_02 +scene0490_00 +scene0491_00 +scene0492_00 +scene0492_01 +scene0493_00 +scene0493_01 +scene0494_00 +scene0495_00 +scene0496_00 +scene0497_00 +scene0498_00 +scene0498_01 +scene0498_02 +scene0499_00 +scene0500_00 +scene0500_01 +scene0501_00 +scene0501_01 +scene0501_02 +scene0502_00 +scene0502_01 +scene0502_02 +scene0503_00 +scene0504_00 +scene0505_00 +scene0505_01 +scene0505_02 +scene0505_03 +scene0505_04 +scene0506_00 +scene0507_00 +scene0508_00 +scene0508_01 +scene0508_02 +scene0509_00 +scene0509_01 +scene0509_02 +scene0510_00 +scene0510_01 +scene0510_02 +scene0511_00 +scene0511_01 +scene0512_00 +scene0513_00 +scene0514_00 +scene0514_01 +scene0515_00 +scene0515_01 +scene0515_02 +scene0516_00 +scene0516_01 +scene0517_00 +scene0517_01 +scene0517_02 +scene0518_00 +scene0519_00 +scene0520_00 +scene0520_01 +scene0521_00 +scene0522_00 +scene0523_00 +scene0523_01 +scene0523_02 +scene0524_00 +scene0524_01 +scene0525_00 +scene0525_01 +scene0525_02 +scene0526_00 +scene0526_01 +scene0527_00 +scene0528_00 +scene0528_01 +scene0529_00 +scene0529_01 +scene0529_02 +scene0530_00 +scene0531_00 +scene0532_00 +scene0532_01 +scene0533_00 +scene0533_01 +scene0534_00 +scene0534_01 +scene0535_00 +scene0536_00 +scene0536_01 +scene0536_02 +scene0537_00 +scene0538_00 +scene0539_00 +scene0539_01 +scene0539_02 +scene0540_00 +scene0540_01 +scene0540_02 +scene0541_00 +scene0541_01 +scene0541_02 +scene0542_00 +scene0543_00 +scene0543_01 +scene0543_02 +scene0544_00 +scene0545_00 +scene0545_01 +scene0545_02 +scene0546_00 +scene0547_00 +scene0547_01 +scene0547_02 +scene0548_00 +scene0548_01 +scene0548_02 +scene0549_00 +scene0549_01 +scene0550_00 +scene0551_00 +scene0552_00 +scene0552_01 +scene0553_00 +scene0553_01 +scene0553_02 +scene0554_00 +scene0554_01 +scene0555_00 +scene0556_00 +scene0556_01 +scene0557_00 +scene0557_01 +scene0557_02 +scene0558_00 +scene0558_01 +scene0558_02 +scene0559_00 +scene0559_01 +scene0559_02 +scene0560_00 +scene0561_00 +scene0561_01 +scene0562_00 +scene0563_00 +scene0564_00 +scene0565_00 +scene0566_00 +scene0567_00 +scene0567_01 +scene0568_00 +scene0568_01 +scene0568_02 +scene0569_00 +scene0569_01 +scene0570_00 +scene0570_01 +scene0570_02 +scene0571_00 +scene0571_01 +scene0572_00 +scene0572_01 +scene0572_02 +scene0573_00 +scene0573_01 +scene0574_00 +scene0574_01 +scene0574_02 +scene0575_00 +scene0575_01 +scene0575_02 +scene0576_00 +scene0576_01 +scene0576_02 +scene0577_00 +scene0578_00 +scene0578_01 +scene0578_02 +scene0579_00 +scene0579_01 +scene0579_02 +scene0580_00 +scene0580_01 +scene0581_00 +scene0581_01 +scene0581_02 +scene0582_00 +scene0582_01 +scene0582_02 +scene0583_00 +scene0583_01 +scene0583_02 +scene0584_00 +scene0584_01 +scene0584_02 +scene0585_00 +scene0585_01 +scene0586_00 +scene0586_01 +scene0586_02 +scene0587_00 +scene0587_01 +scene0587_02 +scene0587_03 +scene0588_00 +scene0588_01 +scene0588_02 +scene0588_03 +scene0589_00 +scene0589_01 +scene0589_02 +scene0590_00 +scene0590_01 +scene0591_00 +scene0591_01 +scene0591_02 +scene0592_00 +scene0592_01 +scene0593_00 +scene0593_01 +scene0594_00 +scene0595_00 +scene0596_00 +scene0596_01 +scene0596_02 +scene0597_00 +scene0597_01 +scene0597_02 +scene0598_00 +scene0598_01 +scene0598_02 +scene0599_00 +scene0599_01 +scene0599_02 +scene0600_00 +scene0600_01 +scene0600_02 +scene0601_00 +scene0601_01 +scene0602_00 +scene0603_00 +scene0603_01 +scene0604_00 +scene0604_01 +scene0604_02 +scene0605_00 +scene0605_01 +scene0606_00 +scene0606_01 +scene0606_02 +scene0607_00 +scene0607_01 +scene0608_00 +scene0608_01 +scene0608_02 +scene0609_00 +scene0609_01 +scene0609_02 +scene0609_03 +scene0610_00 +scene0610_01 +scene0610_02 +scene0611_00 +scene0611_01 +scene0612_00 +scene0612_01 +scene0613_00 +scene0613_01 +scene0613_02 +scene0614_00 +scene0614_01 +scene0614_02 +scene0615_00 +scene0615_01 +scene0616_00 +scene0616_01 +scene0617_00 +scene0618_00 +scene0619_00 +scene0620_00 +scene0620_01 +scene0621_00 +scene0622_00 +scene0622_01 +scene0623_00 +scene0623_01 +scene0624_00 +scene0625_00 +scene0625_01 +scene0626_00 +scene0626_01 +scene0626_02 +scene0627_00 +scene0627_01 +scene0628_00 +scene0628_01 +scene0628_02 +scene0629_00 +scene0629_01 +scene0629_02 +scene0630_00 +scene0630_01 +scene0630_02 +scene0630_03 +scene0630_04 +scene0630_05 +scene0630_06 +scene0631_00 +scene0631_01 +scene0631_02 +scene0632_00 +scene0633_00 +scene0633_01 +scene0634_00 +scene0635_00 +scene0635_01 +scene0636_00 +scene0637_00 +scene0638_00 +scene0639_00 +scene0640_00 +scene0640_01 +scene0640_02 +scene0641_00 +scene0642_00 +scene0642_01 +scene0642_02 +scene0642_03 +scene0643_00 +scene0644_00 +scene0645_00 +scene0645_01 +scene0645_02 +scene0646_00 +scene0646_01 +scene0646_02 +scene0647_00 +scene0647_01 +scene0648_00 +scene0648_01 +scene0649_00 +scene0649_01 +scene0650_00 +scene0651_00 +scene0651_01 +scene0651_02 +scene0652_00 +scene0653_00 +scene0653_01 +scene0654_00 +scene0654_01 +scene0655_00 +scene0655_01 +scene0655_02 +scene0656_00 +scene0656_01 +scene0656_02 +scene0656_03 +scene0657_00 +scene0658_00 +scene0659_00 +scene0659_01 +scene0660_00 +scene0661_00 +scene0662_00 +scene0662_01 +scene0662_02 +scene0663_00 +scene0663_01 +scene0663_02 +scene0664_00 +scene0664_01 +scene0664_02 +scene0665_00 +scene0665_01 +scene0666_00 +scene0666_01 +scene0666_02 +scene0667_00 +scene0667_01 +scene0667_02 +scene0668_00 +scene0669_00 +scene0669_01 +scene0670_00 +scene0670_01 +scene0671_00 +scene0671_01 +scene0672_00 +scene0672_01 +scene0673_00 +scene0673_01 +scene0673_02 +scene0673_03 +scene0673_04 +scene0673_05 +scene0674_00 +scene0674_01 +scene0675_00 +scene0675_01 +scene0676_00 +scene0676_01 +scene0677_00 +scene0677_01 +scene0677_02 +scene0678_00 +scene0678_01 +scene0678_02 +scene0679_00 +scene0679_01 +scene0680_00 +scene0680_01 +scene0681_00 +scene0682_00 +scene0683_00 +scene0684_00 +scene0684_01 +scene0685_00 +scene0685_01 +scene0685_02 +scene0686_00 +scene0686_01 +scene0686_02 +scene0687_00 +scene0688_00 +scene0689_00 +scene0690_00 +scene0690_01 +scene0691_00 +scene0691_01 +scene0692_00 +scene0692_01 +scene0692_02 +scene0692_03 +scene0692_04 +scene0693_00 +scene0693_01 +scene0693_02 +scene0694_00 +scene0694_01 +scene0695_00 +scene0695_01 +scene0695_02 +scene0695_03 +scene0696_00 +scene0696_01 +scene0696_02 +scene0697_00 +scene0697_01 +scene0697_02 +scene0697_03 +scene0698_00 +scene0698_01 +scene0699_00 +scene0700_00 +scene0700_01 +scene0700_02 +scene0701_00 +scene0701_01 +scene0701_02 +scene0702_00 +scene0702_01 +scene0702_02 +scene0703_00 +scene0703_01 +scene0704_00 +scene0704_01 +scene0705_00 +scene0705_01 +scene0705_02 +scene0706_00 +scene0707_00 +scene0708_00 +scene0709_00 +scene0710_00 +scene0711_00 +scene0712_00 +scene0713_00 +scene0714_00 +scene0715_00 +scene0716_00 +scene0717_00 +scene0718_00 +scene0719_00 +scene0720_00 +scene0721_00 +scene0722_00 +scene0723_00 +scene0724_00 +scene0725_00 +scene0726_00 +scene0727_00 +scene0728_00 +scene0729_00 +scene0730_00 +scene0731_00 +scene0732_00 +scene0733_00 +scene0734_00 +scene0735_00 +scene0736_00 +scene0737_00 +scene0738_00 +scene0739_00 +scene0740_00 +scene0741_00 +scene0742_00 +scene0743_00 +scene0744_00 +scene0745_00 +scene0746_00 +scene0747_00 +scene0748_00 +scene0749_00 +scene0750_00 +scene0751_00 +scene0752_00 +scene0753_00 +scene0754_00 +scene0755_00 +scene0756_00 +scene0757_00 +scene0758_00 +scene0759_00 +scene0760_00 +scene0761_00 +scene0762_00 +scene0763_00 +scene0764_00 +scene0765_00 +scene0766_00 +scene0767_00 +scene0768_00 +scene0769_00 +scene0770_00 +scene0771_00 +scene0772_00 +scene0773_00 +scene0774_00 +scene0775_00 +scene0776_00 +scene0777_00 +scene0778_00 +scene0779_00 +scene0780_00 +scene0781_00 +scene0782_00 +scene0783_00 +scene0784_00 +scene0785_00 +scene0786_00 +scene0787_00 +scene0788_00 +scene0789_00 +scene0790_00 +scene0791_00 +scene0792_00 +scene0793_00 +scene0794_00 +scene0795_00 +scene0796_00 +scene0797_00 +scene0798_00 +scene0799_00 +scene0800_00 +scene0801_00 +scene0802_00 +scene0803_00 +scene0804_00 +scene0805_00 +scene0806_00 diff --git a/models/LL3DA/data/scannet/meta_data/scannetv2_test.txt b/models/LL3DA/data/scannet/meta_data/scannetv2_test.txt new file mode 100644 index 0000000..79d15b0 --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/scannetv2_test.txt @@ -0,0 +1,100 @@ +scene0707_00 +scene0708_00 +scene0709_00 +scene0710_00 +scene0711_00 +scene0712_00 +scene0713_00 +scene0714_00 +scene0715_00 +scene0716_00 +scene0717_00 +scene0718_00 +scene0719_00 +scene0720_00 +scene0721_00 +scene0722_00 +scene0723_00 +scene0724_00 +scene0725_00 +scene0726_00 +scene0727_00 +scene0728_00 +scene0729_00 +scene0730_00 +scene0731_00 +scene0732_00 +scene0733_00 +scene0734_00 +scene0735_00 +scene0736_00 +scene0737_00 +scene0738_00 +scene0739_00 +scene0740_00 +scene0741_00 +scene0742_00 +scene0743_00 +scene0744_00 +scene0745_00 +scene0746_00 +scene0747_00 +scene0748_00 +scene0749_00 +scene0750_00 +scene0751_00 +scene0752_00 +scene0753_00 +scene0754_00 +scene0755_00 +scene0756_00 +scene0757_00 +scene0758_00 +scene0759_00 +scene0760_00 +scene0761_00 +scene0762_00 +scene0763_00 +scene0764_00 +scene0765_00 +scene0766_00 +scene0767_00 +scene0768_00 +scene0769_00 +scene0770_00 +scene0771_00 +scene0772_00 +scene0773_00 +scene0774_00 +scene0775_00 +scene0776_00 +scene0777_00 +scene0778_00 +scene0779_00 +scene0780_00 +scene0781_00 +scene0782_00 +scene0783_00 +scene0784_00 +scene0785_00 +scene0786_00 +scene0787_00 +scene0788_00 +scene0789_00 +scene0790_00 +scene0791_00 +scene0792_00 +scene0793_00 +scene0794_00 +scene0795_00 +scene0796_00 +scene0797_00 +scene0798_00 +scene0799_00 +scene0800_00 +scene0801_00 +scene0802_00 +scene0803_00 +scene0804_00 +scene0805_00 +scene0806_00 diff --git a/models/LL3DA/data/scannet/meta_data/scannetv2_train.txt b/models/LL3DA/data/scannet/meta_data/scannetv2_train.txt new file mode 100644 index 0000000..8c75dc7 --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/scannetv2_train.txt @@ -0,0 +1,1201 @@ +scene0000_00 +scene0000_01 +scene0000_02 +scene0001_00 +scene0001_01 +scene0002_00 +scene0002_01 +scene0003_00 +scene0003_01 +scene0003_02 +scene0004_00 +scene0005_00 +scene0005_01 +scene0006_00 +scene0006_01 +scene0006_02 +scene0007_00 +scene0008_00 +scene0009_00 +scene0009_01 +scene0009_02 +scene0010_00 +scene0010_01 +scene0012_00 +scene0012_01 +scene0012_02 +scene0013_00 +scene0013_01 +scene0013_02 +scene0014_00 +scene0016_00 +scene0016_01 +scene0016_02 +scene0017_00 +scene0017_01 +scene0017_02 +scene0018_00 +scene0020_00 +scene0020_01 +scene0021_00 +scene0022_00 +scene0022_01 +scene0023_00 +scene0024_00 +scene0024_01 +scene0024_02 +scene0026_00 +scene0027_00 +scene0027_01 +scene0027_02 +scene0028_00 +scene0029_00 +scene0029_01 +scene0029_02 +scene0031_00 +scene0031_01 +scene0031_02 +scene0032_00 +scene0032_01 +scene0033_00 +scene0034_00 +scene0034_01 +scene0034_02 +scene0035_00 +scene0035_01 +scene0036_00 +scene0036_01 +scene0037_00 +scene0038_00 +scene0038_01 +scene0038_02 +scene0039_00 +scene0039_01 +scene0040_00 +scene0040_01 +scene0041_00 +scene0041_01 +scene0042_00 +scene0042_01 +scene0042_02 +scene0043_00 +scene0043_01 +scene0044_00 +scene0044_01 +scene0044_02 +scene0045_00 +scene0045_01 +scene0047_00 +scene0048_00 +scene0048_01 +scene0049_00 +scene0051_00 +scene0051_01 +scene0051_02 +scene0051_03 +scene0052_00 +scene0052_01 +scene0052_02 +scene0053_00 +scene0054_00 +scene0055_00 +scene0055_01 +scene0055_02 +scene0056_00 +scene0056_01 +scene0057_00 +scene0057_01 +scene0058_00 +scene0058_01 +scene0059_00 +scene0059_01 +scene0059_02 +scene0060_00 +scene0060_01 +scene0061_00 +scene0061_01 +scene0062_00 +scene0062_01 +scene0062_02 +scene0065_00 +scene0065_01 +scene0065_02 +scene0066_00 +scene0067_00 +scene0067_01 +scene0067_02 +scene0068_00 +scene0068_01 +scene0069_00 +scene0070_00 +scene0071_00 +scene0072_00 +scene0072_01 +scene0072_02 +scene0073_00 +scene0073_01 +scene0073_02 +scene0073_03 +scene0074_00 +scene0074_01 +scene0074_02 +scene0075_00 +scene0076_00 +scene0078_00 +scene0078_01 +scene0078_02 +scene0079_00 +scene0079_01 +scene0080_00 +scene0080_01 +scene0080_02 +scene0082_00 +scene0083_00 +scene0083_01 +scene0085_00 +scene0085_01 +scene0087_00 +scene0087_01 +scene0087_02 +scene0089_00 +scene0089_01 +scene0089_02 +scene0090_00 +scene0091_00 +scene0092_00 +scene0092_01 +scene0092_02 +scene0092_03 +scene0092_04 +scene0093_00 +scene0093_01 +scene0093_02 +scene0094_00 +scene0096_00 +scene0096_01 +scene0096_02 +scene0097_00 +scene0098_00 +scene0098_01 +scene0099_00 +scene0099_01 +scene0101_00 +scene0101_01 +scene0101_02 +scene0101_03 +scene0101_04 +scene0101_05 +scene0102_00 +scene0102_01 +scene0103_00 +scene0103_01 +scene0104_00 +scene0105_00 +scene0105_01 +scene0105_02 +scene0106_00 +scene0106_01 +scene0106_02 +scene0107_00 +scene0108_00 +scene0109_00 +scene0109_01 +scene0110_00 +scene0110_01 +scene0110_02 +scene0111_00 +scene0111_01 +scene0111_02 +scene0112_00 +scene0112_01 +scene0112_02 +scene0113_00 +scene0113_01 +scene0114_00 +scene0114_01 +scene0114_02 +scene0115_00 +scene0115_01 +scene0115_02 +scene0116_00 +scene0116_01 +scene0116_02 +scene0117_00 +scene0118_00 +scene0118_01 +scene0118_02 +scene0119_00 +scene0120_00 +scene0120_01 +scene0121_00 +scene0121_01 +scene0121_02 +scene0122_00 +scene0122_01 +scene0123_00 +scene0123_01 +scene0123_02 +scene0124_00 +scene0124_01 +scene0125_00 +scene0126_00 +scene0126_01 +scene0126_02 +scene0127_00 +scene0127_01 +scene0128_00 +scene0129_00 +scene0130_00 +scene0132_00 +scene0132_01 +scene0132_02 +scene0133_00 +scene0134_00 +scene0134_01 +scene0134_02 +scene0135_00 +scene0136_00 +scene0136_01 +scene0136_02 +scene0137_00 +scene0137_01 +scene0137_02 +scene0138_00 +scene0140_00 +scene0140_01 +scene0141_00 +scene0141_01 +scene0141_02 +scene0142_00 +scene0142_01 +scene0143_00 +scene0143_01 +scene0143_02 +scene0145_00 +scene0147_00 +scene0147_01 +scene0148_00 +scene0150_00 +scene0150_01 +scene0150_02 +scene0151_00 +scene0151_01 +scene0152_00 +scene0152_01 +scene0152_02 +scene0154_00 +scene0155_00 +scene0155_01 +scene0155_02 +scene0156_00 +scene0157_00 +scene0157_01 +scene0158_00 +scene0158_01 +scene0158_02 +scene0159_00 +scene0160_00 +scene0160_01 +scene0160_02 +scene0160_03 +scene0160_04 +scene0161_00 +scene0161_01 +scene0161_02 +scene0162_00 +scene0163_00 +scene0163_01 +scene0165_00 +scene0165_01 +scene0165_02 +scene0166_00 +scene0166_01 +scene0166_02 +scene0167_00 +scene0168_00 +scene0168_01 +scene0168_02 +scene0170_00 +scene0170_01 +scene0170_02 +scene0171_00 +scene0171_01 +scene0172_00 +scene0172_01 +scene0173_00 +scene0173_01 +scene0173_02 +scene0174_00 +scene0174_01 +scene0175_00 +scene0176_00 +scene0177_00 +scene0177_01 +scene0177_02 +scene0178_00 +scene0179_00 +scene0180_00 +scene0181_00 +scene0181_01 +scene0181_02 +scene0181_03 +scene0182_00 +scene0182_01 +scene0182_02 +scene0183_00 +scene0184_00 +scene0185_00 +scene0186_00 +scene0186_01 +scene0188_00 +scene0189_00 +scene0190_00 +scene0191_00 +scene0191_01 +scene0191_02 +scene0192_00 +scene0192_01 +scene0192_02 +scene0194_00 +scene0195_00 +scene0195_01 +scene0195_02 +scene0197_00 +scene0197_01 +scene0197_02 +scene0198_00 +scene0199_00 +scene0200_00 +scene0200_01 +scene0200_02 +scene0201_00 +scene0201_01 +scene0201_02 +scene0202_00 +scene0204_00 +scene0204_01 +scene0204_02 +scene0205_00 +scene0205_01 +scene0205_02 +scene0206_00 +scene0206_01 +scene0206_02 +scene0209_00 +scene0209_01 +scene0209_02 +scene0210_00 +scene0210_01 +scene0211_00 +scene0211_01 +scene0211_02 +scene0211_03 +scene0212_00 +scene0212_01 +scene0212_02 +scene0213_00 +scene0214_00 +scene0214_01 +scene0214_02 +scene0215_00 +scene0215_01 +scene0216_00 +scene0218_00 +scene0218_01 +scene0219_00 +scene0220_00 +scene0220_01 +scene0220_02 +scene0223_00 +scene0223_01 +scene0223_02 +scene0224_00 +scene0225_00 +scene0226_00 +scene0226_01 +scene0227_00 +scene0228_00 +scene0229_00 +scene0229_01 +scene0229_02 +scene0230_00 +scene0232_00 +scene0232_01 +scene0232_02 +scene0233_00 +scene0233_01 +scene0234_00 +scene0235_00 +scene0236_00 +scene0236_01 +scene0237_00 +scene0237_01 +scene0238_00 +scene0238_01 +scene0239_00 +scene0239_01 +scene0239_02 +scene0240_00 +scene0241_00 +scene0241_01 +scene0241_02 +scene0242_00 +scene0242_01 +scene0242_02 +scene0243_00 +scene0244_00 +scene0244_01 +scene0245_00 +scene0247_00 +scene0247_01 +scene0248_00 +scene0248_01 +scene0248_02 +scene0250_00 +scene0250_01 +scene0250_02 +scene0252_00 +scene0253_00 +scene0254_00 +scene0254_01 +scene0255_00 +scene0255_01 +scene0255_02 +scene0258_00 +scene0259_00 +scene0259_01 +scene0260_00 +scene0260_01 +scene0260_02 +scene0261_00 +scene0261_01 +scene0261_02 +scene0261_03 +scene0262_00 +scene0262_01 +scene0263_00 +scene0263_01 +scene0264_00 +scene0264_01 +scene0264_02 +scene0265_00 +scene0265_01 +scene0265_02 +scene0266_00 +scene0266_01 +scene0267_00 +scene0268_00 +scene0268_01 +scene0268_02 +scene0269_00 +scene0269_01 +scene0269_02 +scene0270_00 +scene0270_01 +scene0270_02 +scene0271_00 +scene0271_01 +scene0272_00 +scene0272_01 +scene0273_00 +scene0273_01 +scene0274_00 +scene0274_01 +scene0274_02 +scene0275_00 +scene0276_00 +scene0276_01 +scene0279_00 +scene0279_01 +scene0279_02 +scene0280_00 +scene0280_01 +scene0280_02 +scene0281_00 +scene0282_00 +scene0282_01 +scene0282_02 +scene0283_00 +scene0284_00 +scene0285_00 +scene0286_00 +scene0286_01 +scene0286_02 +scene0286_03 +scene0287_00 +scene0288_00 +scene0288_01 +scene0288_02 +scene0289_00 +scene0289_01 +scene0290_00 +scene0291_00 +scene0291_01 +scene0291_02 +scene0292_00 +scene0292_01 +scene0293_00 +scene0293_01 +scene0294_00 +scene0294_01 +scene0294_02 +scene0295_00 +scene0295_01 +scene0296_00 +scene0296_01 +scene0297_00 +scene0297_01 +scene0297_02 +scene0298_00 +scene0299_00 +scene0299_01 +scene0301_00 +scene0301_01 +scene0301_02 +scene0302_00 +scene0302_01 +scene0303_00 +scene0303_01 +scene0303_02 +scene0305_00 +scene0305_01 +scene0306_00 +scene0306_01 +scene0308_00 +scene0309_00 +scene0309_01 +scene0310_00 +scene0310_01 +scene0310_02 +scene0311_00 +scene0312_00 +scene0312_01 +scene0312_02 +scene0313_00 +scene0313_01 +scene0313_02 +scene0315_00 +scene0317_00 +scene0317_01 +scene0318_00 +scene0319_00 +scene0320_00 +scene0320_01 +scene0320_02 +scene0320_03 +scene0321_00 +scene0322_00 +scene0323_00 +scene0323_01 +scene0324_00 +scene0324_01 +scene0325_00 +scene0325_01 +scene0326_00 +scene0327_00 +scene0330_00 +scene0331_00 +scene0331_01 +scene0332_00 +scene0332_01 +scene0332_02 +scene0333_00 +scene0335_00 +scene0335_01 +scene0335_02 +scene0336_00 +scene0336_01 +scene0337_00 +scene0337_01 +scene0337_02 +scene0339_00 +scene0340_00 +scene0340_01 +scene0340_02 +scene0341_00 +scene0341_01 +scene0344_00 +scene0344_01 +scene0345_00 +scene0345_01 +scene0346_00 +scene0346_01 +scene0347_00 +scene0347_01 +scene0347_02 +scene0348_00 +scene0348_01 +scene0348_02 +scene0349_00 +scene0349_01 +scene0350_00 +scene0350_01 +scene0350_02 +scene0352_00 +scene0352_01 +scene0352_02 +scene0358_00 +scene0358_01 +scene0358_02 +scene0359_00 +scene0359_01 +scene0360_00 +scene0361_00 +scene0361_01 +scene0361_02 +scene0362_00 +scene0362_01 +scene0362_02 +scene0362_03 +scene0363_00 +scene0364_00 +scene0364_01 +scene0365_00 +scene0365_01 +scene0365_02 +scene0366_00 +scene0367_00 +scene0367_01 +scene0368_00 +scene0368_01 +scene0369_00 +scene0369_01 +scene0369_02 +scene0370_00 +scene0370_01 +scene0370_02 +scene0371_00 +scene0371_01 +scene0372_00 +scene0373_00 +scene0373_01 +scene0374_00 +scene0375_00 +scene0375_01 +scene0375_02 +scene0376_00 +scene0376_01 +scene0376_02 +scene0379_00 +scene0380_00 +scene0380_01 +scene0380_02 +scene0381_00 +scene0381_01 +scene0381_02 +scene0383_00 +scene0383_01 +scene0383_02 +scene0384_00 +scene0385_00 +scene0385_01 +scene0385_02 +scene0386_00 +scene0387_00 +scene0387_01 +scene0387_02 +scene0388_00 +scene0388_01 +scene0390_00 +scene0391_00 +scene0392_00 +scene0392_01 +scene0392_02 +scene0393_00 +scene0393_01 +scene0393_02 +scene0394_00 +scene0394_01 +scene0395_00 +scene0395_01 +scene0395_02 +scene0396_00 +scene0396_01 +scene0396_02 +scene0397_00 +scene0397_01 +scene0398_00 +scene0398_01 +scene0399_00 +scene0399_01 +scene0400_00 +scene0400_01 +scene0401_00 +scene0402_00 +scene0403_00 +scene0403_01 +scene0404_00 +scene0404_01 +scene0404_02 +scene0405_00 +scene0407_00 +scene0407_01 +scene0408_00 +scene0408_01 +scene0409_00 +scene0409_01 +scene0410_00 +scene0410_01 +scene0411_00 +scene0411_01 +scene0411_02 +scene0413_00 +scene0415_00 +scene0415_01 +scene0415_02 +scene0416_00 +scene0416_01 +scene0416_02 +scene0416_03 +scene0416_04 +scene0417_00 +scene0418_00 +scene0418_01 +scene0418_02 +scene0419_00 +scene0419_01 +scene0419_02 +scene0420_00 +scene0420_01 +scene0420_02 +scene0421_00 +scene0421_01 +scene0421_02 +scene0422_00 +scene0424_00 +scene0424_01 +scene0424_02 +scene0425_00 +scene0425_01 +scene0428_00 +scene0428_01 +scene0429_00 +scene0431_00 +scene0433_00 +scene0434_00 +scene0434_01 +scene0434_02 +scene0436_00 +scene0437_00 +scene0437_01 +scene0438_00 +scene0439_00 +scene0439_01 +scene0440_00 +scene0440_01 +scene0440_02 +scene0442_00 +scene0443_00 +scene0444_00 +scene0444_01 +scene0445_00 +scene0445_01 +scene0446_00 +scene0446_01 +scene0447_00 +scene0447_01 +scene0447_02 +scene0448_00 +scene0448_01 +scene0448_02 +scene0449_00 +scene0449_01 +scene0449_02 +scene0450_00 +scene0451_00 +scene0451_01 +scene0451_02 +scene0451_03 +scene0451_04 +scene0451_05 +scene0452_00 +scene0452_01 +scene0452_02 +scene0453_00 +scene0453_01 +scene0454_00 +scene0455_00 +scene0456_00 +scene0456_01 +scene0457_00 +scene0457_01 +scene0457_02 +scene0459_00 +scene0459_01 +scene0460_00 +scene0463_00 +scene0463_01 +scene0464_00 +scene0465_00 +scene0465_01 +scene0466_00 +scene0466_01 +scene0467_00 +scene0468_00 +scene0468_01 +scene0468_02 +scene0469_00 +scene0469_01 +scene0469_02 +scene0470_00 +scene0470_01 +scene0471_00 +scene0471_01 +scene0471_02 +scene0472_00 +scene0472_01 +scene0472_02 +scene0473_00 +scene0473_01 +scene0475_00 +scene0475_01 +scene0475_02 +scene0476_00 +scene0476_01 +scene0476_02 +scene0477_00 +scene0477_01 +scene0478_00 +scene0478_01 +scene0479_00 +scene0479_01 +scene0479_02 +scene0480_00 +scene0480_01 +scene0481_00 +scene0481_01 +scene0482_00 +scene0482_01 +scene0483_00 +scene0484_00 +scene0484_01 +scene0485_00 +scene0486_00 +scene0487_00 +scene0487_01 +scene0489_00 +scene0489_01 +scene0489_02 +scene0491_00 +scene0492_00 +scene0492_01 +scene0493_00 +scene0493_01 +scene0495_00 +scene0497_00 +scene0498_00 +scene0498_01 +scene0498_02 +scene0499_00 +scene0501_00 +scene0501_01 +scene0501_02 +scene0502_00 +scene0502_01 +scene0502_02 +scene0503_00 +scene0504_00 +scene0505_00 +scene0505_01 +scene0505_02 +scene0505_03 +scene0505_04 +scene0506_00 +scene0507_00 +scene0508_00 +scene0508_01 +scene0508_02 +scene0509_00 +scene0509_01 +scene0509_02 +scene0510_00 +scene0510_01 +scene0510_02 +scene0511_00 +scene0511_01 +scene0512_00 +scene0513_00 +scene0514_00 +scene0514_01 +scene0515_00 +scene0515_01 +scene0515_02 +scene0516_00 +scene0516_01 +scene0517_00 +scene0517_01 +scene0517_02 +scene0519_00 +scene0520_00 +scene0520_01 +scene0521_00 +scene0522_00 +scene0523_00 +scene0523_01 +scene0523_02 +scene0524_00 +scene0524_01 +scene0525_00 +scene0525_01 +scene0525_02 +scene0526_00 +scene0526_01 +scene0528_00 +scene0528_01 +scene0529_00 +scene0529_01 +scene0529_02 +scene0530_00 +scene0531_00 +scene0532_00 +scene0532_01 +scene0533_00 +scene0533_01 +scene0534_00 +scene0534_01 +scene0536_00 +scene0536_01 +scene0536_02 +scene0537_00 +scene0538_00 +scene0539_00 +scene0539_01 +scene0539_02 +scene0540_00 +scene0540_01 +scene0540_02 +scene0541_00 +scene0541_01 +scene0541_02 +scene0542_00 +scene0543_00 +scene0543_01 +scene0543_02 +scene0544_00 +scene0545_00 +scene0545_01 +scene0545_02 +scene0546_00 +scene0547_00 +scene0547_01 +scene0547_02 +scene0548_00 +scene0548_01 +scene0548_02 +scene0551_00 +scene0554_00 +scene0554_01 +scene0555_00 +scene0556_00 +scene0556_01 +scene0557_00 +scene0557_01 +scene0557_02 +scene0560_00 +scene0561_00 +scene0561_01 +scene0562_00 +scene0563_00 +scene0564_00 +scene0566_00 +scene0567_00 +scene0567_01 +scene0569_00 +scene0569_01 +scene0570_00 +scene0570_01 +scene0570_02 +scene0571_00 +scene0571_01 +scene0572_00 +scene0572_01 +scene0572_02 +scene0573_00 +scene0573_01 +scene0576_00 +scene0576_01 +scene0576_02 +scene0577_00 +scene0579_00 +scene0579_01 +scene0579_02 +scene0581_00 +scene0581_01 +scene0581_02 +scene0582_00 +scene0582_01 +scene0582_02 +scene0584_00 +scene0584_01 +scene0584_02 +scene0585_00 +scene0585_01 +scene0586_00 +scene0586_01 +scene0586_02 +scene0587_00 +scene0587_01 +scene0587_02 +scene0587_03 +scene0588_00 +scene0588_01 +scene0588_02 +scene0588_03 +scene0589_00 +scene0589_01 +scene0589_02 +scene0590_00 +scene0590_01 +scene0592_00 +scene0592_01 +scene0594_00 +scene0596_00 +scene0596_01 +scene0596_02 +scene0597_00 +scene0597_01 +scene0597_02 +scene0600_00 +scene0600_01 +scene0600_02 +scene0601_00 +scene0601_01 +scene0602_00 +scene0603_00 +scene0603_01 +scene0604_00 +scene0604_01 +scene0604_02 +scene0605_00 +scene0605_01 +scene0610_00 +scene0610_01 +scene0610_02 +scene0611_00 +scene0611_01 +scene0612_00 +scene0612_01 +scene0613_00 +scene0613_01 +scene0613_02 +scene0614_00 +scene0614_01 +scene0614_02 +scene0615_00 +scene0615_01 +scene0617_00 +scene0619_00 +scene0620_00 +scene0620_01 +scene0622_00 +scene0622_01 +scene0623_00 +scene0623_01 +scene0624_00 +scene0625_00 +scene0625_01 +scene0626_00 +scene0626_01 +scene0626_02 +scene0627_00 +scene0627_01 +scene0628_00 +scene0628_01 +scene0628_02 +scene0630_00 +scene0630_01 +scene0630_02 +scene0630_03 +scene0630_04 +scene0630_05 +scene0630_06 +scene0631_00 +scene0631_01 +scene0631_02 +scene0632_00 +scene0634_00 +scene0635_00 +scene0635_01 +scene0636_00 +scene0637_00 +scene0638_00 +scene0639_00 +scene0640_00 +scene0640_01 +scene0640_02 +scene0641_00 +scene0642_00 +scene0642_01 +scene0642_02 +scene0642_03 +scene0646_00 +scene0646_01 +scene0646_02 +scene0649_00 +scene0649_01 +scene0650_00 +scene0654_00 +scene0654_01 +scene0656_00 +scene0656_01 +scene0656_02 +scene0656_03 +scene0657_00 +scene0659_00 +scene0659_01 +scene0661_00 +scene0662_00 +scene0662_01 +scene0662_02 +scene0666_00 +scene0666_01 +scene0666_02 +scene0667_00 +scene0667_01 +scene0667_02 +scene0668_00 +scene0669_00 +scene0669_01 +scene0672_00 +scene0672_01 +scene0673_00 +scene0673_01 +scene0673_02 +scene0673_03 +scene0673_04 +scene0673_05 +scene0674_00 +scene0674_01 +scene0675_00 +scene0675_01 +scene0676_00 +scene0676_01 +scene0677_00 +scene0677_01 +scene0677_02 +scene0679_00 +scene0679_01 +scene0680_00 +scene0680_01 +scene0681_00 +scene0682_00 +scene0683_00 +scene0687_00 +scene0688_00 +scene0691_00 +scene0691_01 +scene0692_00 +scene0692_01 +scene0692_02 +scene0692_03 +scene0692_04 +scene0694_00 +scene0694_01 +scene0698_00 +scene0698_01 +scene0703_00 +scene0703_01 +scene0705_00 +scene0705_01 +scene0705_02 +scene0706_00 diff --git a/models/LL3DA/data/scannet/meta_data/scannetv2_val.txt b/models/LL3DA/data/scannet/meta_data/scannetv2_val.txt new file mode 100644 index 0000000..36e02b3 --- /dev/null +++ b/models/LL3DA/data/scannet/meta_data/scannetv2_val.txt @@ -0,0 +1,312 @@ +scene0011_00 +scene0011_01 +scene0015_00 +scene0019_00 +scene0019_01 +scene0025_00 +scene0025_01 +scene0025_02 +scene0030_00 +scene0030_01 +scene0030_02 +scene0046_00 +scene0046_01 +scene0046_02 +scene0050_00 +scene0050_01 +scene0050_02 +scene0063_00 +scene0064_00 +scene0064_01 +scene0077_00 +scene0077_01 +scene0081_00 +scene0081_01 +scene0081_02 +scene0084_00 +scene0084_01 +scene0084_02 +scene0086_00 +scene0086_01 +scene0086_02 +scene0088_00 +scene0088_01 +scene0088_02 +scene0088_03 +scene0095_00 +scene0095_01 +scene0100_00 +scene0100_01 +scene0100_02 +scene0131_00 +scene0131_01 +scene0131_02 +scene0139_00 +scene0144_00 +scene0144_01 +scene0146_00 +scene0146_01 +scene0146_02 +scene0149_00 +scene0153_00 +scene0153_01 +scene0164_00 +scene0164_01 +scene0164_02 +scene0164_03 +scene0169_00 +scene0169_01 +scene0187_00 +scene0187_01 +scene0193_00 +scene0193_01 +scene0196_00 +scene0203_00 +scene0203_01 +scene0203_02 +scene0207_00 +scene0207_01 +scene0207_02 +scene0208_00 +scene0217_00 +scene0221_00 +scene0221_01 +scene0222_00 +scene0222_01 +scene0231_00 +scene0231_01 +scene0231_02 +scene0246_00 +scene0249_00 +scene0251_00 +scene0256_00 +scene0256_01 +scene0256_02 +scene0257_00 +scene0277_00 +scene0277_01 +scene0277_02 +scene0278_00 +scene0278_01 +scene0300_00 +scene0300_01 +scene0304_00 +scene0307_00 +scene0307_01 +scene0307_02 +scene0314_00 +scene0316_00 +scene0328_00 +scene0329_00 +scene0329_01 +scene0329_02 +scene0334_00 +scene0334_01 +scene0334_02 +scene0338_00 +scene0338_01 +scene0338_02 +scene0342_00 +scene0343_00 +scene0351_00 +scene0351_01 +scene0353_00 +scene0353_01 +scene0353_02 +scene0354_00 +scene0355_00 +scene0355_01 +scene0356_00 +scene0356_01 +scene0356_02 +scene0357_00 +scene0357_01 +scene0377_00 +scene0377_01 +scene0377_02 +scene0378_00 +scene0378_01 +scene0378_02 +scene0382_00 +scene0382_01 +scene0389_00 +scene0406_00 +scene0406_01 +scene0406_02 +scene0412_00 +scene0412_01 +scene0414_00 +scene0423_00 +scene0423_01 +scene0423_02 +scene0426_00 +scene0426_01 +scene0426_02 +scene0426_03 +scene0427_00 +scene0430_00 +scene0430_01 +scene0432_00 +scene0432_01 +scene0435_00 +scene0435_01 +scene0435_02 +scene0435_03 +scene0441_00 +scene0458_00 +scene0458_01 +scene0461_00 +scene0462_00 +scene0474_00 +scene0474_01 +scene0474_02 +scene0474_03 +scene0474_04 +scene0474_05 +scene0488_00 +scene0488_01 +scene0490_00 +scene0494_00 +scene0496_00 +scene0500_00 +scene0500_01 +scene0518_00 +scene0527_00 +scene0535_00 +scene0549_00 +scene0549_01 +scene0550_00 +scene0552_00 +scene0552_01 +scene0553_00 +scene0553_01 +scene0553_02 +scene0558_00 +scene0558_01 +scene0558_02 +scene0559_00 +scene0559_01 +scene0559_02 +scene0565_00 +scene0568_00 +scene0568_01 +scene0568_02 +scene0574_00 +scene0574_01 +scene0574_02 +scene0575_00 +scene0575_01 +scene0575_02 +scene0578_00 +scene0578_01 +scene0578_02 +scene0580_00 +scene0580_01 +scene0583_00 +scene0583_01 +scene0583_02 +scene0591_00 +scene0591_01 +scene0591_02 +scene0593_00 +scene0593_01 +scene0595_00 +scene0598_00 +scene0598_01 +scene0598_02 +scene0599_00 +scene0599_01 +scene0599_02 +scene0606_00 +scene0606_01 +scene0606_02 +scene0607_00 +scene0607_01 +scene0608_00 +scene0608_01 +scene0608_02 +scene0609_00 +scene0609_01 +scene0609_02 +scene0609_03 +scene0616_00 +scene0616_01 +scene0618_00 +scene0621_00 +scene0629_00 +scene0629_01 +scene0629_02 +scene0633_00 +scene0633_01 +scene0643_00 +scene0644_00 +scene0645_00 +scene0645_01 +scene0645_02 +scene0647_00 +scene0647_01 +scene0648_00 +scene0648_01 +scene0651_00 +scene0651_01 +scene0651_02 +scene0652_00 +scene0653_00 +scene0653_01 +scene0655_00 +scene0655_01 +scene0655_02 +scene0658_00 +scene0660_00 +scene0663_00 +scene0663_01 +scene0663_02 +scene0664_00 +scene0664_01 +scene0664_02 +scene0665_00 +scene0665_01 +scene0670_00 +scene0670_01 +scene0671_00 +scene0671_01 +scene0678_00 +scene0678_01 +scene0678_02 +scene0684_00 +scene0684_01 +scene0685_00 +scene0685_01 +scene0685_02 +scene0686_00 +scene0686_01 +scene0686_02 +scene0689_00 +scene0690_00 +scene0690_01 +scene0693_00 +scene0693_01 +scene0693_02 +scene0695_00 +scene0695_01 +scene0695_02 +scene0695_03 +scene0696_00 +scene0696_01 +scene0696_02 +scene0697_00 +scene0697_01 +scene0697_02 +scene0697_03 +scene0699_00 +scene0700_00 +scene0700_01 +scene0700_02 +scene0701_00 +scene0701_01 +scene0701_02 +scene0702_00 +scene0702_01 +scene0702_02 +scene0704_00 +scene0704_01 diff --git a/models/LL3DA/data/scannet/model_util_scannet.py b/models/LL3DA/data/scannet/model_util_scannet.py new file mode 100644 index 0000000..d887834 --- /dev/null +++ b/models/LL3DA/data/scannet/model_util_scannet.py @@ -0,0 +1,210 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/model_util_scannet.py.""" + +import os +import sys + +import numpy as np + +sys.path.append(os.path.join(os.getcwd(), os.pardir, + 'lib')) # HACK add the lib folder +from lib.config import CONF +from utils.box_util import get_3d_box + + +def in_hull(p, hull): + from scipy.spatial import Delaunay + if not isinstance(hull, Delaunay): + hull = Delaunay(hull) + return hull.find_simplex(p) >= 0 + + +def extract_pc_in_box3d(pc, box3d): + ''' pc: (N,3), box3d: (8,3) ''' + box3d_roi_inds = in_hull(pc[:, 0:3], box3d) + return pc[box3d_roi_inds, :], box3d_roi_inds + + +def rotate_aligned_boxes(input_boxes, rot_mat): + centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6] + new_centers = np.dot(centers, np.transpose(rot_mat)) + + dx, dy = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0 + new_x = np.zeros((dx.shape[0], 4)) + new_y = np.zeros((dx.shape[0], 4)) + + for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]): + crnrs = np.zeros((dx.shape[0], 3)) + crnrs[:, 0] = crnr[0] * dx + crnrs[:, 1] = crnr[1] * dy + crnrs = np.dot(crnrs, np.transpose(rot_mat)) + new_x[:, i] = crnrs[:, 0] + new_y[:, i] = crnrs[:, 1] + + new_dx = 2.0 * np.max(new_x, 1) + new_dy = 2.0 * np.max(new_y, 1) + new_lengths = np.stack((new_dx, new_dy, lengths[:, 2]), axis=1) + + return np.concatenate([new_centers, new_lengths], axis=1) + + +def rotate_aligned_boxes_along_axis(input_boxes, rot_mat, axis): + centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6] + new_centers = np.dot(centers, np.transpose(rot_mat)) + + if axis == 'x': + d1, d2 = lengths[:, 1] / 2.0, lengths[:, 2] / 2.0 + elif axis == 'y': + d1, d2 = lengths[:, 0] / 2.0, lengths[:, 2] / 2.0 + else: + d1, d2 = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0 + + new_1 = np.zeros((d1.shape[0], 4)) + new_2 = np.zeros((d1.shape[0], 4)) + + for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]): + crnrs = np.zeros((d1.shape[0], 3)) + crnrs[:, 0] = crnr[0] * d1 + crnrs[:, 1] = crnr[1] * d2 + crnrs = np.dot(crnrs, np.transpose(rot_mat)) + new_1[:, i] = crnrs[:, 0] + new_2[:, i] = crnrs[:, 1] + + new_d1 = 2.0 * np.max(new_1, 1) + new_d2 = 2.0 * np.max(new_2, 1) + + if axis == 'x': + new_lengths = np.stack((lengths[:, 0], new_d1, new_d2), axis=1) + elif axis == 'y': + new_lengths = np.stack((new_d1, lengths[:, 1], new_d2), axis=1) + else: + new_lengths = np.stack((new_d1, new_d2, lengths[:, 2]), axis=1) + + return np.concatenate([new_centers, new_lengths], axis=1) + + +class ScannetDatasetConfig(object): + + def __init__(self): + self.type2class = { + 'cabinet': 0, + 'bed': 1, + 'chair': 2, + 'sofa': 3, + 'table': 4, + 'door': 5, + 'window': 6, + 'bookshelf': 7, + 'picture': 8, + 'counter': 9, + 'desk': 10, + 'curtain': 11, + 'refrigerator': 12, + 'shower curtain': 13, + 'toilet': 14, + 'sink': 15, + 'bathtub': 16, + 'others': 17 + } + self.class2type = {self.type2class[t]: t for t in self.type2class} + + self.nyu40ids = np.array([ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, + 21, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, + 39, 40 + ]) # exclude wall (1), floor (2), ceiling (22) + self.nyu40id2class = self._get_nyu40id2class() + self.mean_size_arr = np.load( + os.path.join(CONF.PATH.SCANNET, + 'meta_data/scannet_reference_means.npz'))['arr_0'] + + self.num_class = len(self.type2class.keys()) + self.num_heading_bin = 1 + self.num_size_cluster = len(self.type2class.keys()) + + self.type_mean_size = {} + for i in range(self.num_size_cluster): + self.type_mean_size[self.class2type[i]] = self.mean_size_arr[i, :] + + def _get_nyu40id2class(self): + lines = [ + line.rstrip() for line in open( + os.path.join(CONF.PATH.SCANNET, + 'meta_data/scannetv2-labels.combined.tsv')) + ] + lines = lines[1:] + nyu40ids2class = {} + for i in range(len(lines)): + label_classes_set = set(self.type2class.keys()) + elements = lines[i].split('\t') + nyu40_id = int(elements[4]) + nyu40_name = elements[7] + if nyu40_id in self.nyu40ids: + if nyu40_name not in label_classes_set: + nyu40ids2class[nyu40_id] = self.type2class['others'] + else: + nyu40ids2class[nyu40_id] = self.type2class[nyu40_name] + + return nyu40ids2class + + def angle2class(self, angle): + """Convert continuous angle to discrete class. + + [optinal] also small regression number from + class center angle to current angle. + + angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N) + return is class of int32 of 0,1,...,N-1 and a number such that + class*(2pi/N) + number = angle + + NOT USED. + """ + assert (False) + + def class2angle(self, pred_cls, residual, to_label_format=True): + """Inverse function to angle2class. + + As ScanNet only has axis-alined boxes so angles are always 0. + """ + return 0 + + def class2angle_batch(self, pred_cls, residual, to_label_format=True): + """Inverse function to angle2class. + + As ScanNet only has axis-alined boxes so angles are always 0. + """ + return np.zeros(pred_cls.shape[0]) + + def size2class(self, size, type_name): + """Convert 3D box size (l,w,h) to size class and size residual.""" + size_class = self.type2class[type_name] + size_residual = size - self.type_mean_size[type_name] + return size_class, size_residual + + def class2size(self, pred_cls, residual): + """Inverse function to size2class.""" + return self.mean_size_arr[pred_cls] + residual + + def class2size_batch(self, pred_cls, residual): + """Inverse function to size2class.""" + return self.mean_size_arr[pred_cls] + residual + + def param2obb(self, center, heading_class, heading_residual, size_class, + size_residual): + heading_angle = self.class2angle(heading_class, heading_residual) + box_size = self.class2size(int(size_class), size_residual) + obb = np.zeros((7, )) + obb[0:3] = center + obb[3:6] = box_size + obb[6] = heading_angle * -1 + return obb + + def param2obb_batch(self, center, heading_class, heading_residual, + size_class, size_residual): + heading_angle = self.class2angle_batch(heading_class, heading_residual) + box_size = self.class2size_batch(size_class, size_residual) + obb = np.zeros((heading_class.shape[0], 7)) + obb[:, 0:3] = center + obb[:, 3:6] = box_size + obb[:, 6] = heading_angle * -1 + return obb diff --git a/models/LL3DA/data/scannet/scannet_utils.py b/models/LL3DA/data/scannet/scannet_utils.py new file mode 100644 index 0000000..996f675 --- /dev/null +++ b/models/LL3DA/data/scannet/scannet_utils.py @@ -0,0 +1,125 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/scannet_utils.py.""" + +import csv +import json +import os +import sys + +import numpy as np + +try: + from plyfile import PlyData, PlyElement +except: + print("Please install the module 'plyfile' for PLY i/o, e.g.") + print('pip install plyfile') + sys.exit(-1) + + +def normalize_v3(arr): + """Normalize a numpy array of 3 component vectors shape=(n,3)""" + lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2) + arr[:, 0] /= (lens + 1e-8) + arr[:, 1] /= (lens + 1e-8) + arr[:, 2] /= (lens + 1e-8) + return arr + + +def compute_normal(vertices, faces): + #Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal + normals = np.zeros(vertices.shape, dtype=vertices.dtype) + #Create an indexed view into the vertex array using the array of three indices for triangles + tris = vertices[faces] + #Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle + n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0]) + # n is now an array of normals per triangle. The length of each normal is dependent the vertices, + # we need to normalize these, so that our next step weights each normal equally. + normalize_v3(n) + # now we have a normalized array of normals, one per triangle, i.e., per triangle normals. + # But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle, + # the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards. + # The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array + normals[faces[:, 0]] += n + normals[faces[:, 1]] += n + normals[faces[:, 2]] += n + normalize_v3(normals) + + return normals + + +def represents_int(s): + """if string s represents an int.""" + try: + int(s) + return True + except ValueError: + return False + + +def read_label_mapping(filename, + label_from='raw_category', + label_to='nyu40id'): + assert os.path.isfile(filename) + mapping = dict() + with open(filename) as csvfile: + reader = csv.DictReader(csvfile, delimiter='\t') + for row in reader: + mapping[row[label_from]] = int(row[label_to]) + if represents_int(list(mapping.keys())[0]): + mapping = {int(k): v for k, v in mapping.items()} + return mapping + + +def read_mesh_vertices(filename): + """read XYZ for each vertex.""" + assert os.path.isfile(filename) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + return vertices + + +def read_mesh_vertices_rgb(filename): + """read XYZ RGB for each vertex. + + Note: RGB values are in 0-255 + """ + assert os.path.isfile(filename) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + vertices[:, 3] = plydata['vertex'].data['red'] + vertices[:, 4] = plydata['vertex'].data['green'] + vertices[:, 5] = plydata['vertex'].data['blue'] + return vertices + + +def read_mesh_vertices_rgb_normal(filename): + """read XYZ RGB normals point cloud from filename PLY file.""" + assert (os.path.isfile(filename)) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 9], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + vertices[:, 3] = plydata['vertex'].data['red'] + vertices[:, 4] = plydata['vertex'].data['green'] + vertices[:, 5] = plydata['vertex'].data['blue'] + + # compute normals + xyz = np.array([[x, y, z] + for x, y, z, _, _, _, _ in plydata['vertex'].data]) + face = np.array([f[0] for f in plydata['face'].data]) + nxnynz = compute_normal(xyz, face) + vertices[:, 6:] = nxnynz + return vertices diff --git a/models/LL3DA/data/scannet/visualize.py b/models/LL3DA/data/scannet/visualize.py new file mode 100644 index 0000000..d36981c --- /dev/null +++ b/models/LL3DA/data/scannet/visualize.py @@ -0,0 +1,29 @@ +import argparse +import os + +import numpy as np + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scene_id', + type=str, + help='scene id of scene to be visualized', + default='scene0000_00') + args = parser.parse_args() + + verts = np.load('scannet_data/{}_vert.npy'.format(args.scene_id)) + aligned_verts = np.load('scannet_data/{}_aligned_vert.npy'.format( + args.scene_id)) + + with open('scannet_data/{}_verts.obj'.format(args.scene_id), 'w') as f: + for i in range(verts.shape[0]): + f.write('v {} {} {} {} {} {}\n'.format(verts[i, 0], verts[i, 1], + verts[i, 2], verts[i, 3], + verts[i, 4], verts[i, 5])) + + with open('scannet_data/{}_aligned_verts.obj'.format(args.scene_id), + 'w') as f: + for i in range(aligned_verts.shape[0]): + f.write('v {} {} {} {} {} {}\n'.format( + aligned_verts[i, 0], aligned_verts[i, 1], aligned_verts[i, 2], + aligned_verts[i, 3], aligned_verts[i, 4], aligned_verts[i, 5])) diff --git a/models/LL3DA/data/scannet_category_map.json b/models/LL3DA/data/scannet_category_map.json new file mode 100644 index 0000000..ed9177a --- /dev/null +++ b/models/LL3DA/data/scannet_category_map.json @@ -0,0 +1,609 @@ +{ + "wall": "others", + "chair": "chair", + "books": "others", + "floor": "others", + "door": "door", + "object": "others", + "window": "window", + "table": "table", + "trash can": "others", + "pillow": "others", + "picture": "picture", + "ceiling": "others", + "box": "others", + "doorframe": "door", + "monitor": "others", + "cabinet": "cabinet", + "desk": "desk", + "shelf": "others", + "office chair": "chair", + "towel": "others", + "couch": "sofa", + "sink": "sink", + "backpack": "others", + "lamp": "others", + "bed": "bed", + "bookshelf": "bookshelf", + "mirror": "others", + "curtain": "curtain", + "plant": "others", + "whiteboard": "others", + "radiator": "others", + "book": "others", + "kitchen cabinet": "cabinet", + "toilet paper": "others", + "kitchen cabinets": "cabinet", + "armchair": "chair", + "shoes": "others", + "coffee table": "table", + "toilet": "toilet", + "bag": "others", + "clothes": "others", + "keyboard": "others", + "bottle": "others", + "recycling bin": "others", + "nightstand": "others", + "stool": "others", + "tv": "others", + "file cabinet": "cabinet", + "dresser": "others", + "computer tower": "others", + "clothing": "others", + "telephone": "others", + "cup": "others", + "refrigerator": "refrigerator", + "end table": "table", + "jacket": "others", + "shower curtain": "shower curtain", + "bathtub": "bathtub", + "microwave": "others", + "kitchen counter": "counter", + "sofa chair": "chair", + "paper towel dispenser": "others", + "bathroom vanity": "cabinet", + "suitcase": "others", + "laptop": "others", + "ottoman": "others", + "shower walls": "others", + "printer": "others", + "counter": "counter", + "board": "others", + "soap dispenser": "others", + "stove": "others", + "light": "others", + "closet wall": "others", + "mini fridge": "refrigerator", + "cabinets": "cabinet", + "doors": "door", + "fan": "others", + "tissue box": "others", + "blanket": "others", + "bathroom stall": "others", + "copier": "others", + "bench": "others", + "bar": "others", + "soap dish": "others", + "laundry hamper": "others", + "storage bin": "others", + "bathroom stall door": "door", + "light switch": "others", + "coffee maker": "others", + "tv stand": "others", + "decoration": "others", + "ceiling light": "others", + "range hood": "others", + "blackboard": "others", + "clock": "others", + "wardrobe closet": "others", + "rail": "others", + "bulletin board": "others", + "mat": "others", + "trash bin": "others", + "ledge": "others", + "seat": "others", + "mouse": "others", + "basket": "others", + "shower": "others", + "dumbbell": "others", + "paper": "others", + "person": "others", + "windowsill": "others", + "closet": "others", + "bucket": "others", + "sign": "others", + "speaker": "others", + "dishwasher": "others", + "container": "others", + "stair rail": "others", + "shower curtain rod": "others", + "tube": "others", + "bathroom cabinet": "cabinet", + "papers": "others", + "storage container": "others", + "paper bag": "others", + "paper towel roll": "others", + "ball": "others", + "closet doors": "door", + "laundry basket": "others", + "cart": "others", + "closet door": "door", + "dish rack": "others", + "stairs": "others", + "blinds": "others", + "stack of chairs": "chair", + "purse": "others", + "bicycle": "others", + "tray": "others", + "plunger": "others", + "paper cutter": "others", + "toilet paper dispenser": "others", + "boxes": "others", + "bin": "others", + "toilet seat cover dispenser": "others", + "guitar": "others", + "mailboxes": "others", + "handicap bar": "others", + "fire extinguisher": "others", + "ladder": "others", + "column": "others", + "pipe": "others", + "vacuum cleaner": "others", + "plate": "others", + "piano": "others", + "water cooler": "others", + "cd case": "others", + "bowl": "others", + "closet rod": "others", + "bathroom counter": "counter", + "oven": "others", + "stand": "others", + "scale": "others", + "washing machine": "others", + "broom": "others", + "hat": "others", + "shower wall": "others", + "guitar case": "others", + "rack": "others", + "water pitcher": "others", + "laundry detergent": "others", + "hair dryer": "others", + "pillar": "others", + "divider": "others", + "power outlet": "others", + "dining table": "table", + "shower floor": "others", + "washing machines": "others", + "shower door": "door", + "coffee kettle": "others", + "wardrobe cabinet": "others", + "structure": "others", + "bookshelves": "bookshelf", + "clothes dryer": "others", + "toaster": "others", + "shoe": "others", + "ironing board": "others", + "alarm clock": "others", + "shower head": "others", + "lamp base": "others", + "water bottle": "others", + "keyboard piano": "others", + "projector screen": "others", + "case of water bottles": "others", + "toaster oven": "others", + "music stand": "others", + "staircase": "others", + "coat rack": "others", + "storage organizer": "others", + "machine": "others", + "folded chair": "chair", + "fire alarm": "others", + "fireplace": "others", + "vent": "others", + "furniture": "others", + "power strip": "others", + "calendar": "others", + "poster": "picture", + "toilet paper holder": "others", + "potted plant": "others", + "stuffed animal": "others", + "luggage": "others", + "curtains": "curtain", + "headphones": "others", + "crate": "others", + "candle": "others", + "projector": "others", + "clothes dryers": "others", + "mattress": "bed", + "dustpan": "others", + "drawer": "others", + "rod": "others", + "globe": "others", + "footrest": "others", + "piano bench": "others", + "breakfast bar": "others", + "step stool": "others", + "hand rail": "others", + "vending machine": "others", + "ceiling fan": "others", + "swiffer": "others", + "foosball table": "others", + "jar": "others", + "footstool": "others", + "folded table": "table", + "round table": "table", + "hamper": "others", + "poster tube": "others", + "case": "others", + "carpet": "others", + "thermostat": "others", + "coat": "others", + "water fountain": "others", + "smoke detector": "others", + "pillows": "others", + "flip flops": "others", + "cloth": "others", + "banner": "others", + "clothes hanger": "others", + "whiteboard eraser": "others", + "iron": "others", + "instrument case": "others", + "toilet paper rolls": "others", + "soap": "others", + "block": "others", + "wall hanging": "others", + "kitchen island": "others", + "pipes": "others", + "toothbrush": "others", + "shirt": "others", + "cutting board": "others", + "vase": "others", + "shower control valve": "others", + "exercise machine": "others", + "compost bin": "others", + "shorts": "others", + "tire": "others", + "teddy bear": "others", + "bathrobe": "others", + "handrail": "others", + "faucet": "others", + "pantry wall": "others", + "thermos": "others", + "rug": "others", + "couch cushions": "others", + "tripod": "others", + "mailbox": "others", + "tupperware": "others", + "shoe rack": "others", + "towels": "others", + "beer bottles": "others", + "treadmill": "others", + "salt": "others", + "chest": "others", + "dispenser": "others", + "mirror doors": "door", + "remote": "others", + "folded ladder": "others", + "cushion": "others", + "carton": "others", + "step": "others", + "drying rack": "others", + "slippers": "others", + "pool table": "others", + "soda stream": "others", + "toilet brush": "others", + "loft bed": "bed", + "cooking pot": "others", + "heater": "others", + "messenger bag": "others", + "stapler": "others", + "closet walls": "others", + "scanner": "others", + "elliptical machine": "others", + "kettle": "others", + "metronome": "others", + "dumbell": "others", + "music book": "others", + "rice cooker": "others", + "dart board": "others", + "sewing machine": "others", + "grab bar": "others", + "flowerpot": "others", + "painting": "picture", + "railing": "others", + "stair": "others", + "toolbox": "others", + "nerf gun": "others", + "binders": "others", + "desk lamp": "others", + "quadcopter": "others", + "pitcher": "others", + "hanging": "others", + "mail": "others", + "closet ceiling": "others", + "hoverboard": "others", + "beanbag chair": "others", + "water heater": "others", + "spray bottle": "others", + "rope": "others", + "plastic container": "others", + "soap bottle": "others", + "ikea bag": "others", + "sleeping bag": "others", + "duffel bag": "others", + "frying pan": "others", + "oven mitt": "others", + "pot": "others", + "hand dryer": "others", + "dollhouse": "others", + "shampoo bottle": "others", + "hair brush": "others", + "tennis racket": "others", + "display case": "others", + "ping pong table": "others", + "boiler": "others", + "bag of coffee beans": "others", + "bananas": "others", + "carseat": "others", + "helmet": "others", + "umbrella": "others", + "coffee box": "others", + "envelope": "others", + "wet floor sign": "others", + "clothing rack": "others", + "controller": "others", + "bath walls": "others", + "podium": "others", + "storage box": "others", + "dolly": "others", + "shampoo": "others", + "paper tray": "others", + "cabinet door": "door", + "changing station": "others", + "poster printer": "others", + "screen": "others", + "soap bar": "others", + "crutches": "others", + "studio light": "others", + "stack of cups": "others", + "toilet flush button": "others", + "trunk": "others", + "grocery bag": "others", + "plastic bin": "others", + "pizza box": "others", + "cabinet doors": "cabinet", + "legs": "others", + "car": "others", + "shaving cream": "others", + "luggage stand": "others", + "shredder": "others", + "statue": "others", + "urinal": "toilet", + "hose": "others", + "bike pump": "others", + "coatrack": "others", + "bear": "others", + "wall lamp": "others", + "humidifier": "others", + "toothpaste": "others", + "mouthwash bottle": "others", + "poster cutter": "others", + "golf bag": "others", + "food container": "others", + "camera": "others", + "table lamp": "others", + "yoga mat": "others", + "card": "others", + "mug": "others", + "shower doors": "others", + "cardboard": "others", + "rack stand": "others", + "boxes of paper": "others", + "flag": "others", + "futon": "others", + "magazine": "others", + "exit sign": "others", + "rolled poster": "others", + "wheel": "others", + "pictures": "picture", + "blackboard eraser": "others", + "organizer": "others", + "doll": "others", + "book rack": "others", + "laundry bag": "others", + "sponge": "others", + "seating": "others", + "folded chairs": "chair", + "lotion bottle": "others", + "can": "others", + "lunch box": "others", + "food display": "others", + "storage shelf": "others", + "sliding wood door": "others", + "pants": "others", + "wood": "others", + "boards": "others", + "bottles": "others", + "washcloth": "others", + "workbench": "others", + "open kitchen cabinet": "cabinet", + "organizer shelf": "others", + "frame": "others", + "cups": "others", + "exercise ball": "others", + "easel": "others", + "garbage bag": "others", + "roomba": "others", + "garage door": "others", + "luggage rack": "others", + "bike lock": "others", + "briefcase": "others", + "hand towel": "others", + "bath products": "others", + "star": "others", + "map": "others", + "coffee bean bag": "others", + "headboard": "others", + "ipad": "others", + "display rack": "others", + "traffic cone": "others", + "toiletry": "others", + "canopy": "others", + "massage chair": "chair", + "paper organizer": "others", + "barricade": "others", + "platform": "others", + "cap": "others", + "dumbbell plates": "others", + "elevator": "others", + "cooking pan": "others", + "trash bag": "others", + "santa": "others", + "jewelry box": "others", + "boat": "others", + "sock": "others", + "kinect": "others", + "crib": "others", + "plastic storage bin": "others", + "cooler": "refrigerator", + "kitchen apron": "others", + "dishwashing soap bottle": "others", + "xbox controller": "others", + "banana holder": "others", + "ping pong paddle": "others", + "airplane": "others", + "conditioner bottle": "others", + "tea kettle": "others", + "bedframe": "others", + "wood beam": "others", + "toilet paper package": "others", + "wall mounted coat rack": "others", + "film light": "others", + "ceiling lamp": "others", + "chain": "others", + "sofa": "sofa", + "closet wardrobe": "others", + "sweater": "others", + "kitchen mixer": "others", + "wardrobe": "others", + "water softener": "others", + "banister": "others", + "trolley": "others", + "pantry shelf": "others", + "sofa bed": "bed", + "loofa": "others", + "shower faucet handle": "others", + "toy piano": "others", + "fish": "others", + "file cabinets": "cabinet", + "cat litter box": "others", + "electric panel": "others", + "suitcases": "others", + "curtain rod": "others", + "bunk bed": "others", + "chandelier": "others", + "tape": "others", + "plates": "others", + "alarm": "others", + "fire hose": "others", + "toy dinosaur": "others", + "cone": "others", + "glass doors": "door", + "hatrack": "others", + "subwoofer": "others", + "fire sprinkler": "others", + "trash cabinet": "cabinet", + "pantry walls": "others", + "photo": "others", + "barrier": "others", + "stacks of cups": "others", + "beachball": "others", + "folded boxes": "others", + "contact lens solution bottle": "others", + "covered box": "others", + "folder": "others", + "mail trays": "others", + "slipper": "others", + "magazine rack": "others", + "sticker": "others", + "lotion": "others", + "buddha": "others", + "file organizer": "others", + "paper towel rolls": "others", + "night lamp": "others", + "fuse box": "others", + "knife block": "others", + "furnace": "others", + "cd cases": "others", + "stools": "others", + "hand sanitzer dispenser": "others", + "teapot": "others", + "pen holder": "others", + "tray rack": "others", + "wig": "others", + "switch": "others", + "plastic containers": "others", + "night light": "others", + "notepad": "others", + "mail bin": "others", + "elevator button": "others", + "gaming wheel": "others", + "drum set": "others", + "cosmetic bag": "others", + "coffee mug": "others", + "closet shelf": "others", + "baby mobile": "others", + "diaper bin": "others", + "door wall": "others", + "stepstool": "others", + "paper shredder": "others", + "dress rack": "others", + "cover": "others", + "shopping bag": "others", + "sliding door": "door", + "exercise bike": "others", + "recliner chair": "chair", + "kitchenaid mixer": "others", + "soda can": "others", + "stovetop": "others", + "stepladder": "others", + "tap": "others", + "cable": "others", + "baby changing station": "others", + "costume": "others", + "rocking chair": "chair", + "binder": "others", + "media center": "cabinet", + "towel rack": "others", + "medal": "others", + "stack of folded chairs": "chair", + "telescope": "others", + "closet doorframe": "door", + "glass": "others", + "baseball cap": "others", + "battery disposal jar": "others", + "mop": "others", + "tank": "others", + "mail tray": "others", + "centerpiece": "others", + "stick": "others", + "closet floor": "others", + "dryer sheets": "others", + "bycicle": "others", + "flower stand": "others", + "air mattress": "bed", + "clip": "others", + "side table": "table", + "pizza boxes": "others", + "display": "others", + "postcard": "others", + "display sign": "others", + "paper towel": "others", + "boots": "others", + "tennis racket bag": "others", + "air hockey table": "table", + "socks": "others", + "food bag": "others", + "clothes hangers": "others", + "starbucks cup": "others" +} diff --git a/models/LL3DA/datasets/scannet.py b/models/LL3DA/datasets/scannet.py new file mode 100644 index 0000000..0ba9bde --- /dev/null +++ b/models/LL3DA/datasets/scannet.py @@ -0,0 +1,34 @@ +from datasets.scannet_base_dataset import (BASE, DatasetConfig, + ScanNetBaseDataset) +from eval_utils.evaluate_det import evaluate + + +class Dataset(ScanNetBaseDataset): + + def __init__( + self, + args, + dataset_config, + split_set='train', + num_points=40000, + use_color=False, + use_normal=False, + use_multiview=False, + use_height=False, + augment=False, + ): + super().__init__( + args, + dataset_config, + split_set=split_set, + num_points=num_points, + use_color=use_color, + use_normal=use_normal, + use_multiview=use_multiview, + use_height=use_height, + augment=augment, + use_random_cuboid=False, + random_cuboid_min_points=None, + ) + + self.eval_func = evaluate diff --git a/models/LL3DA/datasets/scannet_base_dataset.py b/models/LL3DA/datasets/scannet_base_dataset.py new file mode 100644 index 0000000..8261243 --- /dev/null +++ b/models/LL3DA/datasets/scannet_base_dataset.py @@ -0,0 +1,427 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Modified from https://github.com/facebookresearch/votenet Dataset for object +bounding box regression. + +An axis aligned bounding box is parameterized by (cx,cy,cz) and (dx,dy,dz) +where (cx,cy,cz) is the center point of the box, dx is the x-axis length of the +box. +""" +import multiprocessing as mp +import os +import sys + +import h5py +import numpy as np +import torch +import utils.pc_util as pc_util +from torch.utils.data import Dataset +from utils.box_util import (flip_axis_to_camera_np, flip_axis_to_camera_tensor, + get_3d_box_batch_np, get_3d_box_batch_tensor) +from utils.pc_util import scale_points, shift_scale_points +from utils.random_cuboid import RandomCuboid + +IGNORE_LABEL = -100 +MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8]) +BASE = '.' ## Replace with path to dataset +DATASET_ROOT_DIR = os.path.join(BASE, 'data', 'scannet', 'scannet_data') +DATASET_METADATA_DIR = os.path.join(BASE, 'data', 'scannet', 'meta_data') + +# some processes are no-use, just drop it off + + +class DatasetConfig(object): + + def __init__(self): + self.num_semcls = 18 + self.num_angle_bin = 1 + self.max_num_obj = 128 + + def angle2class(self, angle): + raise ValueError('ScanNet does not have rotated bounding boxes.') + + def class2anglebatch_tensor(self, + pred_cls, + residual, + to_label_format=True): + zero_angle = torch.zeros( + (pred_cls.shape[0], pred_cls.shape[1]), + dtype=torch.float32, + device=pred_cls.device, + ) + return zero_angle + + def class2anglebatch(self, pred_cls, residual, to_label_format=True): + zero_angle = np.zeros(pred_cls.shape[0], dtype=np.float32) + return zero_angle + + def param2obb( + self, + center, + heading_class, + heading_residual, + size_class, + size_residual, + box_size=None, + ): + heading_angle = self.class2angle(heading_class, heading_residual) + if box_size is None: + box_size = self.class2size(int(size_class), size_residual) + obb = np.zeros((7, )) + obb[0:3] = center + obb[3:6] = box_size + obb[6] = heading_angle * -1 + return obb + + def box_parametrization_to_corners(self, box_center_unnorm, box_size, + box_angle): + box_center_upright = flip_axis_to_camera_tensor(box_center_unnorm) + boxes = get_3d_box_batch_tensor(box_size, box_angle, + box_center_upright) + return boxes + + def box_parametrization_to_corners_np(self, box_center_unnorm, box_size, + box_angle): + box_center_upright = flip_axis_to_camera_np(box_center_unnorm) + boxes = get_3d_box_batch_np(box_size, box_angle, box_center_upright) + return boxes + + @staticmethod + def rotate_aligned_boxes(input_boxes, rot_mat): + centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6] + new_centers = np.dot(centers, np.transpose(rot_mat)) + + dx, dy = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0 + new_x = np.zeros((dx.shape[0], 4)) + new_y = np.zeros((dx.shape[0], 4)) + + for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]): + crnrs = np.zeros((dx.shape[0], 3)) + crnrs[:, 0] = crnr[0] * dx + crnrs[:, 1] = crnr[1] * dy + crnrs = np.dot(crnrs, np.transpose(rot_mat)) + new_x[:, i] = crnrs[:, 0] + new_y[:, i] = crnrs[:, 1] + + new_dx = 2.0 * np.max(new_x, 1) + new_dy = 2.0 * np.max(new_y, 1) + new_lengths = np.stack((new_dx, new_dy, lengths[:, 2]), axis=1) + + return np.concatenate([new_centers, new_lengths], axis=1) + + +class ScanNetBaseDataset(Dataset): + + def __init__( + self, + args, + dataset_config, + split_set='train', + num_points=40000, + use_color=False, + use_normal=False, + use_multiview=False, + use_height=False, + augment=False, + use_random_cuboid=True, + random_cuboid_min_points=30000, + ): + + self.dataset_config = dataset_config + # assert split_set in ["train", "val"] + + root_dir = DATASET_ROOT_DIR + meta_data_dir = DATASET_METADATA_DIR + + self.data_path = root_dir + + self.num_points = num_points + self.use_color = use_color + self.use_normal = use_normal + self.use_multiview = use_multiview + self.use_height = use_height + self.augment = augment + self.use_random_cuboid = use_random_cuboid + self.random_cuboid_augmentor = RandomCuboid( + min_points=random_cuboid_min_points) + self.center_normalizing_range = [ + np.zeros((1, 3), dtype=np.float32), + np.ones((1, 3), dtype=np.float32), + ] + + self.multiview_data = {} + + def __len__(self): + return len(self.scan_names) + + def _get_scan_data(self, scan_name, input_bbox=None): + + MAX_NUM_OBJ = self.dataset_config.max_num_obj + + # points, colors, instance_labels = pcd_data[0], pcd_data[1], pcd_data[-1] + pcd_data = self.MMScan_loader.get_possess('point_clouds', scan_name) + + mesh_vertices = np.concatenate((pcd_data[0], pcd_data[1]), axis=1) + instance_labels = pcd_data[-1] + + # semantic is no use + semantic_labels = pcd_data[-1] + + # todo: here we should not simple use the [:6], simple transform from euler is OK. + + instance_bboxes = [ + np.array(self.embodied_scan_box_info[scan_name][obj_id]['bbox']) + for obj_id in self.embodied_scan_box_info[scan_name] + ] + instance_bboxes = [ + self.MMScan_loader.down_9DOF_to_6DOF(pcd_data, instance_bbox) + for instance_bbox in instance_bboxes + ] + instance_bboxes = np.stack(instance_bboxes) + + if instance_bboxes.shape[0] > MAX_NUM_OBJ: + instance_bboxes = instance_bboxes[:MAX_NUM_OBJ, :] + if not self.use_color: + point_cloud = mesh_vertices[:, 0:3] # do not use color for now + pcl_color = mesh_vertices[:, 3:6] + else: + point_cloud = mesh_vertices[:, 0:6] + # skip the div process + point_cloud[:, 3:] = (point_cloud[:, 3:] * 256.0 - + MEAN_COLOR_RGB) / 256.0 + pcl_color = point_cloud[:, 3:] + + if self.use_normal: + normals = np.zeros( + mesh_vertices[:, 0:3].shape) #mesh_vertices[:,6:9].shape + point_cloud = np.concatenate([point_cloud, normals], 1) + assert point_cloud.size > 0 + + if self.use_multiview: + # load multiview database + pid = mp.current_process().pid + if pid not in self.multiview_data: + self.multiview_data[pid] = h5py.File(os.path.join( + self.data_path, 'enet_feats_maxpool.hdf5'), + 'r', + libver='latest') + multiview = self.multiview_data[pid][scan_name] + point_cloud = np.concatenate([point_cloud, multiview], 1) + + # adding a height-feature + if self.use_height: + floor_height = np.percentile(point_cloud[:, 2], 0.99) + height = point_cloud[:, 2] - floor_height + point_cloud = np.concatenate( + [point_cloud, np.expand_dims(height, 1)], 1) + + # ------------------------------- LABELS ------------------------------ + target_bboxes = np.zeros((MAX_NUM_OBJ, 6), dtype=np.float32) + target_bboxes_mask = np.zeros((MAX_NUM_OBJ), dtype=np.float32) + angle_classes = np.zeros((MAX_NUM_OBJ, ), dtype=np.int64) + angle_residuals = np.zeros((MAX_NUM_OBJ, ), dtype=np.float32) + raw_sizes = np.zeros((MAX_NUM_OBJ, 3), dtype=np.float32) + raw_angles = np.zeros((MAX_NUM_OBJ, ), dtype=np.float32) + object_ids = np.zeros((MAX_NUM_OBJ, ), dtype=np.int64) + + ### skip ### + if self.augment and self.use_random_cuboid: + ( + point_cloud, + instance_bboxes, + per_point_labels, + ) = self.random_cuboid_augmentor( + point_cloud, instance_bboxes, + [instance_labels, semantic_labels]) + instance_labels = per_point_labels[0] + semantic_labels = per_point_labels[1] + ### skip ### + + point_cloud, choices = pc_util.random_sampling(point_cloud, + self.num_points, + return_choices=True) + instance_labels = instance_labels[choices] + semantic_labels = semantic_labels[choices] + pcl_color = pcl_color[choices] + + target_bboxes_mask[0:instance_bboxes.shape[0]] = 1 + target_bboxes[0:instance_bboxes.shape[0], :] = instance_bboxes[:, 0:6] + + if input_bbox is not None: + + input_bbox = np.array([np.array(t) for t in input_bbox]) + input_bbox = np.array([ + self.MMScan_loader.down_9DOF_to_6DOF(pcd_data, _bbox) + for _bbox in input_bbox + ]) + if len(input_bbox.shape) == 1: + input_bbox = np.expand_dims(input_bbox, axis=0) + + # augment: rotation and filp + if self.augment: + + if np.random.random() > 0.5: + # Flipping along the YZ plane + point_cloud[:, 0] = -1 * point_cloud[:, 0] + target_bboxes[:, 0] = -1 * target_bboxes[:, 0] + if input_bbox is not None: + input_bbox[:, 0] = -1 * input_bbox[:, 0] + + if np.random.random() > 0.5: + # Flipping along the XZ plane + point_cloud[:, 1] = -1 * point_cloud[:, 1] + target_bboxes[:, 1] = -1 * target_bboxes[:, 1] + if input_bbox is not None: + input_bbox[:, 1] = -1 * input_bbox[:, 1] + + # Rotation along up-axis/Z-axis + rot_angle = (np.random.random() * np.pi / + 18) - np.pi / 36 # -5 ~ +5 degree + rot_mat = pc_util.rotz(rot_angle) + point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], + np.transpose(rot_mat)) + target_bboxes = self.dataset_config.rotate_aligned_boxes( + target_bboxes, rot_mat) + if input_bbox is not None: + input_bbox = self.dataset_config.rotate_aligned_boxes( + input_bbox, rot_mat) + + raw_sizes = target_bboxes[:, 3:6] + + if input_bbox is not None: + embodied_scan_raw_sizes = input_bbox[:, 3:6] + point_cloud_dims_min = point_cloud[..., :3].min(axis=0) + point_cloud_dims_max = point_cloud[..., :3].max(axis=0) + + box_centers = target_bboxes.astype(np.float32)[:, 0:3] + box_centers_normalized = shift_scale_points( + box_centers[None, ...], + src_range=[ + point_cloud_dims_min[None, ...], + point_cloud_dims_max[None, ...], + ], + dst_range=self.center_normalizing_range, + ) + box_centers_normalized = box_centers_normalized.squeeze(0) + box_centers_normalized = box_centers_normalized * target_bboxes_mask[ + ..., None] + + if input_bbox is not None: + embodied_scan_box_centers = input_bbox.astype(np.float32)[:, 0:3] + embodied_scan_box_centers_normalized = shift_scale_points( + embodied_scan_box_centers[None, ...], + src_range=[ + point_cloud_dims_min[None, ...], + point_cloud_dims_max[None, ...], + ], + dst_range=self.center_normalizing_range, + ) + embodied_scan_box_centers_normalized = embodied_scan_box_centers_normalized.squeeze( + 0) + + mult_factor = point_cloud_dims_max - point_cloud_dims_min + box_sizes_normalized = scale_points( + raw_sizes.astype(np.float32)[None, ...], + mult_factor=1.0 / mult_factor[None, ...], + ) + box_sizes_normalized = box_sizes_normalized.squeeze(0) + if input_bbox is not None: + embodied_scan_box_sizes_normalized = scale_points( + embodied_scan_raw_sizes.astype(np.float32)[None, ...], + mult_factor=1.0 / mult_factor[None, ...], + ) + embodied_scan_box_sizes_normalized = embodied_scan_box_sizes_normalized.squeeze( + 0) + + box_corners = self.dataset_config.box_parametrization_to_corners_np( + box_centers[None, ...], + raw_sizes.astype(np.float32)[None, ...], + raw_angles.astype(np.float32)[None, ...], + ) + box_corners = box_corners.squeeze(0) + object_ids[:instance_bboxes.shape[0]] = instance_bboxes[:, -1] + + if input_bbox is not None: + embodied_scan_box_corners = self.dataset_config.box_parametrization_to_corners_np( + embodied_scan_box_centers[None, ...], + embodied_scan_raw_sizes.astype(np.float32)[None, ...], + np.zeros((embodied_scan_box_centers.shape[0], ), + dtype=np.float32)[None, ...], + ) + embodied_scan_box_corners = embodied_scan_box_corners.squeeze(0) + + ret_dict = {} + if input_bbox is not None: + ret_dict['input_box_corners'] = embodied_scan_box_corners.astype( + np.float32) + ret_dict['input_box_centers'] = embodied_scan_box_centers.astype( + np.float32) + ret_dict[ + 'input_box_centers_normalized'] = embodied_scan_box_centers_normalized.astype( + np.float32) + ret_dict[ + 'input_box_sizes_normalized'] = embodied_scan_box_sizes_normalized.astype( + np.float32) + else: + ret_dict['input_box_corners'] = np.zeros( + (MAX_NUM_OBJ, 24)).astype(np.float32) + ret_dict['input_box_centers'] = np.zeros( + (MAX_NUM_OBJ, 3)).astype(np.float32) + ret_dict['input_centers_normalized'] = np.zeros( + (MAX_NUM_OBJ, 3)).astype(np.float32) + ret_dict['input_sizes_normalized'] = np.zeros( + (MAX_NUM_OBJ, 3)).astype(np.float32) + + ret_dict['point_clouds'] = point_cloud.astype(np.float32) + ret_dict['gt_box_corners'] = box_corners.astype(np.float32) + ret_dict['gt_box_centers'] = box_centers.astype(np.float32) + ret_dict['gt_box_centers_normalized'] = box_centers_normalized.astype( + np.float32) + ret_dict['gt_angle_class_label'] = angle_classes.astype(np.int64) + ret_dict['gt_angle_residual_label'] = angle_residuals.astype( + np.float32) + target_bboxes_semcls = np.zeros((MAX_NUM_OBJ)) + + ret_dict['gt_box_sem_cls_label'] = target_bboxes_semcls.astype( + np.int64) + ret_dict['gt_box_present'] = target_bboxes_mask.astype(np.float32) + ret_dict['pcl_color'] = pcl_color + ret_dict['gt_box_sizes'] = raw_sizes.astype(np.float32) + ret_dict['gt_box_sizes_normalized'] = box_sizes_normalized.astype( + np.float32) + ret_dict['gt_box_angles'] = raw_angles.astype(np.float32) + ret_dict['point_cloud_dims_min'] = point_cloud_dims_min.astype( + np.float32) + ret_dict['point_cloud_dims_max'] = point_cloud_dims_max.astype( + np.float32) + ret_dict['gt_object_ids'] = object_ids.astype(np.int64) + + # compute votes *AFTER* augmentation + # generate votes + # Note: since there's no map between bbox instance labels and + # pc instance_labels (it had been filtered + # in the data preparation step) we'll compute the instance bbox + # from the points sharing the same instance label. + + # point_votes = np.zeros([self.num_points, 3]) + # point_votes_mask = np.zeros(self.num_points) + # for i_instance in np.unique(instance_labels): + # # find all points belong to that instance + # ind = np.where(instance_labels == i_instance)[0] + # # find the semantic label + # if semantic_labels[ind[0]] in self.dataset_config.nyu40ids: + # x = point_cloud[ind,:3] + # center = 0.5*(x.min(0) + x.max(0)) + # point_votes[ind, :] = center - x + # point_votes_mask[ind] = 1.0 + # point_votes = np.tile(point_votes, (1, 3)) # make 3 votes identical + + # ret_dict['vote_label'] = point_votes.astype(np.float32) + # ret_dict['vote_label_mask'] = point_votes_mask.astype(np.int64) + + return ret_dict + + def __getitem__(self, idx): + scan_name = self.scan_names[idx] + ret_dict = self._get_scan_data(scan_name) + ret_dict['scan_idx'] = np.array(idx).astype(np.int64) + return ret_dict diff --git a/models/LL3DA/datasets/task_prompts.py b/models/LL3DA/datasets/task_prompts.py new file mode 100644 index 0000000..8e2b0b8 --- /dev/null +++ b/models/LL3DA/datasets/task_prompts.py @@ -0,0 +1,27 @@ +# the model has the ability to align text with the box + +TASK_PROPMT = { + 'embodied_qa': { + 'without_box': + dict( + instruction= + '### human: given the 3D scene, answer the question: "{question}" according to the given 3D scene. ### assistant:', + answer='{answer}', + do_localize=False), + 'with_box': + dict( + instruction= + '### human: given the 3D scene, answer the question: "{question}" at "{locations}". ### assistant:', + answer='{answer}', + do_localize=False), + }, + 'embodied_cap': [ + dict( + instruction= + '### human: given the 3D scene, answer the question: "{question}" at "{locations}" ### assistant:', + answer='{answer}', + do_localize=False), + ] +} +BOX_FORMAT = '{}, {}, {}, {}, {}, {}' +COORD_FORMAT = '{}, {}' diff --git a/models/LL3DA/datasets/unified_embodied_scan_qa.py b/models/LL3DA/datasets/unified_embodied_scan_qa.py new file mode 100644 index 0000000..de35a4e --- /dev/null +++ b/models/LL3DA/datasets/unified_embodied_scan_qa.py @@ -0,0 +1,453 @@ +import json +import os +import os.path as osp +import pickle +import random +from copy import deepcopy +from glob import glob +from typing import Dict, List + +import numpy as np +import torch +import utils.pc_util as pc_util +from datasets.scannet_base_dataset import (BASE, DatasetConfig, + ScanNetBaseDataset) +from datasets.task_prompts import BOX_FORMAT, TASK_PROPMT +from eval_utils.evaluate_mmscan import evaluate +from transformers import AutoTokenizer + +MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8]) +from utils.box_util import (flip_axis_to_camera_np, flip_axis_to_camera_tensor, + get_3d_box_batch_np, get_3d_box_batch_tensor) +from utils.pc_util import scale_points, shift_scale_points + +from mmscan import MMScan + + +class Dataset(ScanNetBaseDataset): + + def __init__( + self, + args, + dataset_config, + split_set='train', + num_points=40000, + use_color=False, + use_normal=False, + use_multiview=False, + use_height=False, + augment=False, + ): + super().__init__( + args, + dataset_config, + split_set=split_set, + num_points=num_points, + use_color=use_color, + use_normal=use_normal, + use_multiview=use_multiview, + use_height=use_height, + augment=augment, + use_random_cuboid=False, + random_cuboid_min_points=None, + ) + + assert split_set in ['train', 'val'] + + # scannet base init + self.dataset_config = dataset_config + self.num_points = num_points + self.use_color = use_color + self.use_normal = use_normal + self.use_multiview = use_multiview + self.use_height = use_height + self.augment = augment + self.use_random_cuboid = False + self.split = split_set + + self.center_normalizing_range = [ + np.zeros((1, 3), dtype=np.float32), + np.ones((1, 3), dtype=np.float32), + ] + self.multiview_data = {} + + # MMScan QA task init + + self.task_name = 'embodied_qa' + self.grid_size_3d = args.grid_size_3d + self.max_prompts = args.max_prompts + self.dataset_config = dataset_config + self.max_des_len = args.max_des_len + + ## initialize tokenizer and set tokenizer's `padding token` to `eos token` + self.tokenizer = AutoTokenizer.from_pretrained(args.vocab, + add_bos_token=False) + self.tokenizer.pad_token = self.tokenizer.eos_token + self.tokenizer.padding_side = 'right' + self.qtokenizer = AutoTokenizer.from_pretrained(args.qformer_vocab) + self.qtokenizer.pad_token = self.tokenizer.eos_token + self.qtokenizer.padding_side = 'right' + + ## super configuration + self.tokenizer_config = dict(max_length=self.max_des_len, + padding='max_length', + truncation='longest_first', + return_tensors='np') + + # downsample for quick evaluation + self.MMScan_loader = MMScan(version='v1', split=split_set,\ + task='MMScan-QA', ratio = 0.1 if split_set=='val' else 1.0 ) + + # only need this for convenient evaluation + self.eval_func = evaluate + self.annotations = self.MMScan_loader.samples + + def __len__(self): + + return len(self.MMScan_loader) + + def __getitem__(self, idx): + + data_sample_dict = self.parse_dict(self.MMScan_loader[idx]) + + return data_sample_dict + + def parse_dict(self, data_dict) -> dict: + + idx = data_dict['index'] + task_name = self.task_name + question = data_dict['question'].lower() + + input_bboxes = [] + if data_dict['input_bboxes'] is not None: + assert len(data_dict['input_bboxes']) == len( + data_dict['input_bboxes_id']) + for bbox in data_dict['input_bboxes']: + input_bboxes.append(torch.tensor(bbox)) + # make it coordinated + input_bboxes = input_bboxes[:self.max_prompts] + ret_dict = self._get_scan_data(data_dict['ori_pcds'], + data_dict['bboxes'], + input_bbox=input_bboxes) + boxes = self._encode_box_coords(ret_dict) + else: + ret_dict = self._get_scan_data(data_dict['ori_pcds'], + data_dict['bboxes'], + input_bbox=None) + boxes = None + + task_key = 'with_box' if boxes is not None else 'without_box' + prompt = deepcopy(TASK_PROPMT[task_name][task_key]) + prompt['instruction'] = prompt['instruction'].format(locations=boxes, + question=question) + + # if data_dict["input_bboxes"] is not None: + # print(len(data_dict["input_bboxes"])) + # print(prompt['instruction']) + + if self.split == 'train': + caption = data_dict['answers'][0] + response = prompt['answer'].format(answer=caption) + else: + caption = '' + response = '' + + prompt_inputs = self.tokenizer.batch_encode_plus( + [prompt['instruction']], **self.tokenizer_config) + qformer_inputs = self.qtokenizer.batch_encode_plus( + [prompt['instruction']], **self.tokenizer_config) + + ## input_ids as labels for LLM + llm_inputs = self.tokenizer.batch_encode_plus([ + ' '.join( + (prompt['instruction'], response, self.tokenizer.eos_token)) + ], **self.tokenizer_config) + + box_query = np.zeros((self.max_prompts, 8, 3)) + box_mask = np.zeros((self.max_prompts, )) + click_query = np.zeros((self.max_prompts, 3)) + click_mask = np.zeros((self.max_prompts, )) + + if data_dict['input_bboxes'] is not None: + if random.random() > 0.5: + # use box to identify an object + for _index in range(len(input_bboxes)): + box_query[_index] = ret_dict['input_box_corners'][ + _index].reshape(8, 3).astype(np.float32) + box_mask[_index] = 1 + else: + # use click to identify an object + for _index in range(len(input_bboxes)): + click_query[_index] = ret_dict['input_box_centers'][ + _index].reshape(3, ).astype(np.float32) + click_mask[_index] = 1 + else: + box_query = np.zeros((self.max_prompts, 8, 3)) + box_mask = np.zeros((self.max_prompts, )) + click_query = np.zeros((self.max_prompts, 3)) + click_mask = np.zeros((self.max_prompts, )) + + ret_dict['box_query'] = box_query.astype(np.float32) + ret_dict['box_mask'] = box_mask.astype(np.float32) + ret_dict['click_query'] = click_query.astype(np.float32) + ret_dict['click_mask'] = click_mask.astype(np.float32) + + ret_dict['input_ids'] = llm_inputs['input_ids'][0].astype(np.int64) + ret_dict['attention_mask'] = llm_inputs['attention_mask'][0].astype( + np.float32) + ret_dict['gradient_mask'] = ( + llm_inputs['attention_mask'][0] - + prompt_inputs['attention_mask'][0]).astype(np.float32) + + ret_dict['scan_idx'] = np.array(idx).astype(np.int64) + ret_dict['instruction'] = prompt_inputs['input_ids'][0].astype( + np.int64) + ret_dict['instruction_mask'] = prompt_inputs['attention_mask'][ + 0].astype(np.float32) + ret_dict['qformer_input_ids'] = qformer_inputs['input_ids'][0].astype( + np.int64) + ret_dict['qformer_attention_mask'] = qformer_inputs['attention_mask'][ + 0].astype(np.float32) + + keys_to_remove = [k for k in ret_dict.keys() if 'input_box' in str(k)] + for k in keys_to_remove: + ret_dict.pop(k) + return ret_dict + + def _encode_box_coords(self, ret_dict): + + # TODO: output the pcd and the box info here to check if they are match, ensure that's correct + + center_normalized = ret_dict['input_box_centers_normalized'] + size_normalized = ret_dict['input_box_sizes_normalized'] + box_normalized = np.hstack( + (center_normalized, size_normalized)) # (-1, 6) + # + box_normalized = (box_normalized * self.grid_size_3d).astype(np.int64) + return ' '.join(BOX_FORMAT.format(*box) for box in box_normalized) + + def _get_scan_data(self, ori_pcds, data_bboxes, input_bbox=None): + + MAX_NUM_OBJ = self.dataset_config.max_num_obj + + # points, colors, instance_labels = pcd_data[0], pcd_data[1], pcd_data[-1] + pcd_data = ori_pcds + + mesh_vertices = np.concatenate((pcd_data[0], pcd_data[1]), axis=1) + instance_labels = pcd_data[-1] + + # semantic is no use + semantic_labels = pcd_data[-1] + + #try: + instance_bboxes = [ + np.array(data_bboxes[obj_id]['bbox']) for obj_id in data_bboxes + ] + instance_bboxes = [ + instance_bbox[:6] for instance_bbox in instance_bboxes + ] + instance_bboxes = np.stack(instance_bboxes) + # except: + # print([np.array(data_bboxes[obj_id]["bbox"]).shape for obj_id in data_bboxes]) + # print([instance_bbox.shape for instance_bbox in instance_bboxes]) + # import json + # with open("write_log.json","w") as f: + # json.dump(str([np.array(data_bboxes[obj_id]["bbox"]).shape for obj_id in data_bboxes])+str([instance_bbox.shape for instance_bbox in instance_bboxes]),f) + + if instance_bboxes.shape[0] > MAX_NUM_OBJ: + instance_bboxes = instance_bboxes[:MAX_NUM_OBJ, :] + if not self.use_color: + point_cloud = mesh_vertices[:, 0:3] # do not use color for now + pcl_color = mesh_vertices[:, 3:6] + else: + point_cloud = mesh_vertices[:, 0:6] + # skip the div process + point_cloud[:, 3:] = (point_cloud[:, 3:] * 256.0 - + MEAN_COLOR_RGB) / 256.0 + pcl_color = point_cloud[:, 3:] + + if self.use_normal: + normals = np.zeros( + mesh_vertices[:, 0:3].shape) #mesh_vertices[:,6:9].shape + point_cloud = np.concatenate([point_cloud, normals], 1) + assert point_cloud.size > 0 + + # adding a height-feature + if self.use_height: + floor_height = np.percentile(point_cloud[:, 2], 0.99) + height = point_cloud[:, 2] - floor_height + point_cloud = np.concatenate( + [point_cloud, np.expand_dims(height, 1)], 1) + + # ------------------------------- LABELS ------------------------------ + target_bboxes = np.zeros((MAX_NUM_OBJ, 6), dtype=np.float32) + target_bboxes_mask = np.zeros((MAX_NUM_OBJ), dtype=np.float32) + angle_classes = np.zeros((MAX_NUM_OBJ, ), dtype=np.int64) + angle_residuals = np.zeros((MAX_NUM_OBJ, ), dtype=np.float32) + raw_sizes = np.zeros((MAX_NUM_OBJ, 3), dtype=np.float32) + raw_angles = np.zeros((MAX_NUM_OBJ, ), dtype=np.float32) + object_ids = np.zeros((MAX_NUM_OBJ, ), dtype=np.int64) + + point_cloud, choices = pc_util.random_sampling(point_cloud, + self.num_points, + return_choices=True) + instance_labels = instance_labels[choices] + semantic_labels = semantic_labels[choices] + pcl_color = pcl_color[choices] + + target_bboxes_mask[0:instance_bboxes.shape[0]] = 1 + target_bboxes[0:instance_bboxes.shape[0], :] = instance_bboxes[:, 0:6] + + if input_bbox is not None: + + input_bbox = np.array([np.array(t) for t in input_bbox]) + input_bbox = np.array([_bbox[:6] for _bbox in input_bbox]) + if len(input_bbox.shape) == 1: + input_bbox = np.expand_dims(input_bbox, axis=0) + + # augment: rotation and filp + if self.augment: + + if np.random.random() > 0.5: + # Flipping along the YZ plane + point_cloud[:, 0] = -1 * point_cloud[:, 0] + target_bboxes[:, 0] = -1 * target_bboxes[:, 0] + if input_bbox is not None: + input_bbox[:, 0] = -1 * input_bbox[:, 0] + + if np.random.random() > 0.5: + # Flipping along the XZ plane + point_cloud[:, 1] = -1 * point_cloud[:, 1] + target_bboxes[:, 1] = -1 * target_bboxes[:, 1] + if input_bbox is not None: + input_bbox[:, 1] = -1 * input_bbox[:, 1] + + # Rotation along up-axis/Z-axis + rot_angle = (np.random.random() * np.pi / + 18) - np.pi / 36 # -5 ~ +5 degree + rot_mat = pc_util.rotz(rot_angle) + point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], + np.transpose(rot_mat)) + target_bboxes = self.dataset_config.rotate_aligned_boxes( + target_bboxes, rot_mat) + if input_bbox is not None: + input_bbox = self.dataset_config.rotate_aligned_boxes( + input_bbox, rot_mat) + + raw_sizes = target_bboxes[:, 3:6] + + if input_bbox is not None: + embodied_scan_raw_sizes = input_bbox[:, 3:6] + + point_cloud_dims_min = point_cloud[..., :3].min(axis=0) + point_cloud_dims_max = point_cloud[..., :3].max(axis=0) + + box_centers = target_bboxes.astype(np.float32)[:, 0:3] + box_centers_normalized = shift_scale_points( + box_centers[None, ...], + src_range=[ + point_cloud_dims_min[None, ...], + point_cloud_dims_max[None, ...], + ], + dst_range=self.center_normalizing_range, + ) + box_centers_normalized = box_centers_normalized.squeeze(0) + box_centers_normalized = box_centers_normalized * target_bboxes_mask[ + ..., None] + + if input_bbox is not None: + embodied_scan_box_centers = input_bbox.astype(np.float32)[:, 0:3] + embodied_scan_box_centers_normalized = shift_scale_points( + embodied_scan_box_centers[None, ...], + src_range=[ + point_cloud_dims_min[None, ...], + point_cloud_dims_max[None, ...], + ], + dst_range=self.center_normalizing_range, + ) + embodied_scan_box_centers_normalized = embodied_scan_box_centers_normalized.squeeze( + 0) + + mult_factor = point_cloud_dims_max - point_cloud_dims_min + box_sizes_normalized = scale_points( + raw_sizes.astype(np.float32)[None, ...], + mult_factor=1.0 / mult_factor[None, ...], + ) + box_sizes_normalized = box_sizes_normalized.squeeze(0) + if input_bbox is not None: + embodied_scan_box_sizes_normalized = scale_points( + embodied_scan_raw_sizes.astype(np.float32)[None, ...], + mult_factor=1.0 / mult_factor[None, ...], + ) + embodied_scan_box_sizes_normalized = embodied_scan_box_sizes_normalized.squeeze( + 0) + + box_corners = self.dataset_config.box_parametrization_to_corners_np( + box_centers[None, ...], + raw_sizes.astype(np.float32)[None, ...], + raw_angles.astype(np.float32)[None, ...], + ) + box_corners = box_corners.squeeze(0) + object_ids[:instance_bboxes.shape[0]] = instance_bboxes[:, -1] + + if input_bbox is not None: + embodied_scan_box_corners = self.dataset_config.box_parametrization_to_corners_np( + embodied_scan_box_centers[None, ...], + embodied_scan_raw_sizes.astype(np.float32)[None, ...], + np.zeros((embodied_scan_box_centers.shape[0], ), + dtype=np.float32)[None, ...], + ) + embodied_scan_box_corners = embodied_scan_box_corners.squeeze(0) + + ret_dict = {} + if input_bbox is not None: + ret_dict['input_box_corners'] = embodied_scan_box_corners.astype( + np.float32) + ret_dict['input_box_centers'] = embodied_scan_box_centers.astype( + np.float32) + ret_dict[ + 'input_box_centers_normalized'] = embodied_scan_box_centers_normalized.astype( + np.float32) + ret_dict[ + 'input_box_sizes_normalized'] = embodied_scan_box_sizes_normalized.astype( + np.float32) + else: + ret_dict['input_box_corners'] = np.zeros( + (MAX_NUM_OBJ, 24)).astype(np.float32) + ret_dict['input_box_centers'] = np.zeros( + (MAX_NUM_OBJ, 3)).astype(np.float32) + ret_dict['input_box_centers_normalized'] = np.zeros( + (MAX_NUM_OBJ, 3)).astype(np.float32) + ret_dict['input_box_sizes_normalized'] = np.zeros( + (MAX_NUM_OBJ, 3)).astype(np.float32) + + ret_dict['point_clouds'] = point_cloud.astype(np.float32) + ret_dict['gt_box_corners'] = box_corners.astype(np.float32) + ret_dict['gt_box_centers'] = box_centers.astype(np.float32) + ret_dict['gt_box_centers_normalized'] = box_centers_normalized.astype( + np.float32) + ret_dict['gt_angle_class_label'] = angle_classes.astype(np.int64) + ret_dict['gt_angle_residual_label'] = angle_residuals.astype( + np.float32) + target_bboxes_semcls = np.zeros((MAX_NUM_OBJ)) + + ret_dict['gt_box_sem_cls_label'] = target_bboxes_semcls.astype( + np.int64) + ret_dict['gt_box_present'] = target_bboxes_mask.astype(np.float32) + ret_dict['pcl_color'] = pcl_color + ret_dict['gt_box_sizes'] = raw_sizes.astype(np.float32) + ret_dict['gt_box_sizes_normalized'] = box_sizes_normalized.astype( + np.float32) + ret_dict['gt_box_angles'] = raw_angles.astype(np.float32) + ret_dict['point_cloud_dims_min'] = point_cloud_dims_min.astype( + np.float32) + ret_dict['point_cloud_dims_max'] = point_cloud_dims_max.astype( + np.float32) + ret_dict['gt_object_ids'] = object_ids.astype(np.int64) + + return ret_dict + + +if __name__ == '__main__': + test = Dataset('', DatasetConfig()) + print(len(test)) diff --git a/models/LL3DA/engine.py b/models/LL3DA/engine.py new file mode 100644 index 0000000..3a1cdac --- /dev/null +++ b/models/LL3DA/engine.py @@ -0,0 +1,219 @@ +import datetime +import importlib +import json +import math +import os +import sys +import time +import tracemalloc +from collections import OrderedDict, defaultdict + +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from utils.ap_calculator import APCalculator +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import (all_gather_dict, all_reduce_average, barrier, get_rank, + init_distributed, is_distributed, is_primary) +from utils.io import save_checkpoint +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +class Logger: + + def __init__(self, args): + exp_name = os.path.split(args.checkpoint_dir)[-1] + self.logger = open( + os.path.join(args.checkpoint_dir, f'{exp_name}-logger.log'), 'a') + + def __call__(self, info_str): + self.logger.write(info_str + '\n') + self.logger.flush() + print(info_str) + + +def compute_learning_rate(args, curr_epoch_normalized): + assert curr_epoch_normalized <= 1.0 and curr_epoch_normalized >= 0.0 + if (curr_epoch_normalized <= (args.warm_lr_epochs / args.max_epoch) + and args.warm_lr_epochs > 0): + # Linear Warmup + curr_lr = args.warm_lr + curr_epoch_normalized * args.max_epoch * ( + (args.base_lr - args.warm_lr) / args.warm_lr_epochs) + else: + # Cosine Learning Rate Schedule + curr_lr = args.final_lr + 0.5 * (args.base_lr - args.final_lr) * ( + 1 + math.cos(math.pi * curr_epoch_normalized)) + return curr_lr + + +def adjust_learning_rate(args, optimizer, curr_epoch): + curr_lr = compute_learning_rate(args, curr_epoch) + for param_group in optimizer.param_groups: + param_group['lr'] = curr_lr + return curr_lr + + +def do_train(args, + model, + model_no_ddp, + optimizer, + dataset_config, + dataloaders, + best_val_metrics=dict()): + + logout = Logger(args) + + if is_primary(): + tracemalloc.start() + logout(f'call with args: {args}') + logout(f'{model}') + + curr_iter = args.start_epoch * len(dataloaders['train']) + max_iters = args.max_epoch * len(dataloaders['train']) + net_device = next(model.parameters()).device + + time_delta = SmoothedValue(window_size=10) + loss_avg = SmoothedValue(window_size=10) + + model.train() + barrier() + + max_tolerant_nan = 4 + curr_nan_times = 0 + + for curr_epoch in range(args.start_epoch, args.max_epoch): + + if is_distributed(): + dataloaders['train_sampler'].set_epoch(curr_epoch) + + for batch_idx, batch_data_label in enumerate(dataloaders['train']): + + curr_time = time.time() + + curr_iter = curr_epoch * len(dataloaders['train']) + batch_idx + curr_lr = adjust_learning_rate(args, optimizer, + curr_iter / max_iters) + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + # Forward pass + optimizer.zero_grad() + + outputs = model(batch_data_label, is_eval=False) + loss = outputs['loss'] + loss = all_reduce_average(loss) + + if not math.isfinite(loss.item()): + if curr_nan_times < max_tolerant_nan: + logout('Loss in not finite. Skip this training step.') + curr_nan_times += 1 + continue + else: + logout('Loss in not finite. Terminate training.') + exit(-1) + curr_nan_times = 0 + + loss.backward() + if args.clip_gradient > 0: + torch.nn.utils.clip_grad_norm_(model.parameters(), + args.clip_gradient) + optimizer.step() + + time_delta.update(time.time() - curr_time) + loss_avg.update(loss.item()) + if is_primary(): + current, peak = tracemalloc.get_traced_memory() + with open('log_cnt.json', 'a') as f: + json.dump( + f'Current memory usage is {current / 10**6}MB; Peak was {peak / 10**6}MB\n', + f) + + # logging + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + eta_seconds = (max_iters - curr_iter) * time_delta.avg + eta_str = str(datetime.timedelta(seconds=int(eta_seconds))) + logout(f'Epoch [{curr_epoch}/{args.max_epoch}]; ' + f'Iter [{curr_iter}/{max_iters}]; ' + f'Loss {loss_avg.avg:0.2f}; ' + f'LR {curr_lr:0.2e}; Iter time {time_delta.avg:0.2f}; ' + f'ETA {eta_str}; Mem {mem_mb:0.2f}MB') + + barrier() + # save ckpt + if is_primary() and (curr_iter + 1) % args.save_every == 0: + save_checkpoint( + args.checkpoint_dir, + model_no_ddp, + optimizer, + curr_epoch, + args, + best_val_metrics, + filename=f'checkpoint_{(curr_iter + 1) // 1000}k.pth', + ) + + # eval + if (curr_iter + 1) % args.eval_every_iteration == 0 \ + and (curr_iter + 1) > args.start_eval_after: + + eval_metrics = {} + model.eval() + for test_loader in dataloaders['test']: + task_metrics = test_loader.dataset.eval_func( + args, + curr_epoch, + model, + dataset_config, + test_loader, + logout, + curr_train_iter=curr_iter) + eval_metrics.update(task_metrics) + model.train() + + if not best_val_metrics or (best_val_metrics[args.criterion] < + eval_metrics[args.criterion]): + best_val_metrics = eval_metrics + filename = 'checkpoint_best.pth' + save_checkpoint( + args.checkpoint_dir, + model_no_ddp, + optimizer, + curr_epoch, + args, + best_val_metrics, + filename='checkpoint_best.pth', + ) + if is_primary(): + logout( + f'Epoch [{curr_epoch}/{args.max_epoch}] ' + f'saved current best val checkpoint at {filename}; ' + f'{args.criterion} {eval_metrics[args.criterion]}') + # end of an iteration + + # end of an epoch + save_checkpoint( + args.checkpoint_dir, + model_no_ddp, + optimizer, + curr_epoch, + args, + best_val_metrics, + filename='checkpoint.pth', + ) + + # end of training + eval_metrics = {} + model.eval() + for test_loader in dataloaders['test']: + task_metrics = test_loader.dataset.eval_func(args, + curr_epoch, + model, + dataset_config, + test_loader, + logout, + curr_train_iter=curr_iter) + eval_metrics.update(task_metrics) + return diff --git a/models/LL3DA/eval_utils/evaluate_densecap.py b/models/LL3DA/eval_utils/evaluate_densecap.py new file mode 100644 index 0000000..3152416 --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_densecap.py @@ -0,0 +1,276 @@ +import json +import os +import time +from collections import OrderedDict, defaultdict + +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import all_gather_dict, barrier, is_primary +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +def score_captions(corpus: dict, candidates: dict): + + bleu = capblue.Bleu(4).compute_score(corpus, candidates) + cider = capcider.Cider().compute_score(corpus, candidates) + rouge = caprouge.Rouge().compute_score(corpus, candidates) + meteor = capmeteor.Meteor().compute_score(corpus, candidates) + + score_per_caption = { + 'bleu-1': [float(s) for s in bleu[1][0]], + 'bleu-2': [float(s) for s in bleu[1][1]], + 'bleu-3': [float(s) for s in bleu[1][2]], + 'bleu-4': [float(s) for s in bleu[1][3]], + 'cider': [float(s) for s in cider[1]], + 'rouge': [float(s) for s in rouge[1]], + 'meteor': [float(s) for s in meteor[1]], + } + + message = '\n'.join([ + '[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][0], max(bleu[1][0]), min(bleu[1][0])), + '[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][1], max(bleu[1][1]), min(bleu[1][1])), + '[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][2], max(bleu[1][2]), min(bleu[1][2])), + '[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][3], max(bleu[1][3]), min(bleu[1][3])), + '[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + cider[0], max(cider[1]), min(cider[1])), + '[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + rouge[0], max(rouge[1]), min(rouge[1])), + '[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + meteor[0], max(meteor[1]), min(meteor[1])) + ]) + + eval_metric = { + 'BLEU-4': bleu[0][3], + 'CiDEr': cider[0], + 'Rouge': rouge[0], + 'METEOR': meteor[0], + } + return score_per_caption, message, eval_metric + + +def prepare_corpus(raw_data, max_len: int = 30) -> dict: + # helper function to prepare ground truth captions + corpus = defaultdict(list) + object_id_to_name = defaultdict(lambda: 'unknown') + + for data in raw_data: + + (scene_id, object_id, object_name + ) = data['scene_id'], data['object_id'], data['object_name'] + + # parse language tokens + token = data['token'][:max_len] + description = ' '.join(['sos'] + token + ['eos']) + key = f'{scene_id}|{object_id}|{object_name}' + object_id_to_name[f'{scene_id}|{object_id}'] = object_name + + corpus[key].append(description) + + return corpus, object_id_to_name + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + scene_list = dataset_loader.dataset.scan_names + corpus, object_id_to_name = prepare_corpus( + dataset_loader.dataset.scanrefer) + task_name = dataset_loader.dataset.task_name + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + candidates = {'caption': OrderedDict({}), 'iou': defaultdict(float)} + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + } + outputs = model(model_input, is_eval=True, task_name='dense-cap') + + outputs = dict( + box_corners=outputs['box_corners'], + sem_cls_prob=outputs['sem_cls_prob'], + objectness_prob=outputs['objectness_prob'], + output_ids=outputs['output_ids'], + sem_cls_logits=outputs['sem_cls_logits'], + ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + ### match objects + batch_size, MAX_NUM_OBJ, _, _ = batch_data_label[ + 'gt_box_corners'].shape + _, nqueries, _, _ = outputs['box_corners'].shape + + match_box_ious = box3d_iou_batch_tensor( # batch, nqueries, MAX_NUM_OBJ + (outputs['box_corners'].unsqueeze(2).repeat( + 1, 1, MAX_NUM_OBJ, 1, 1).view(-1, 8, 3)), + (batch_data_label['gt_box_corners'].unsqueeze(1).repeat( + 1, nqueries, 1, 1, 1).view(-1, 8, 3))).view( + batch_size, nqueries, MAX_NUM_OBJ) + match_box_ious, match_box_idxs = match_box_ious.max( + -1) # batch, nqueries + match_box_idxs = torch.gather(batch_data_label['gt_object_ids'], 1, + match_box_idxs) # batch, nqueries + + # ---- Checkout bounding box ious and semantic logits + good_bbox_masks = match_box_ious > args.test_min_iou # batch, nqueries + good_bbox_masks &= outputs['sem_cls_logits'].argmax(-1) != ( + outputs['sem_cls_logits'].shape[-1] - 1) + + # ---- add nms to get accurate predictions + nms_bbox_masks = parse_predictions( # batch x nqueries + outputs['box_corners'], outputs['sem_cls_prob'], + outputs['objectness_prob'], batch_data_label['point_clouds']) + nms_bbox_masks = torch.from_numpy(nms_bbox_masks).long() == 1 + good_bbox_masks &= nms_bbox_masks.to(good_bbox_masks.device) + + good_bbox_masks = good_bbox_masks.cpu().tolist() + + output_ids = outputs['output_ids'] # batch x nqueries x max_length + captions = tokenizer.batch_decode(output_ids.reshape( + -1, output_ids.shape[-1]), + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + captions = [ + [ + ('sos ' + captions[batch_id * nqueries + prop_id] + ' eos').replace(' ', ' ') \ + for prop_id in range(nqueries) + ] \ + for batch_id in range(batch_size) + ] + + match_box_idxs = match_box_idxs.cpu().tolist() + match_box_ious = match_box_ious.cpu().tolist() + ### calculate measurable indicators on captions + for idx, scene_id in enumerate( + batch_data_label['scan_idx'].cpu().tolist()): + scene_name = scene_list[scene_id] + for prop_id in range(nqueries): + + if good_bbox_masks[idx][prop_id] is False: + continue + + match_obj_id = match_box_idxs[idx][prop_id] + match_obj_iou = match_box_ious[idx][prop_id] + + object_name = object_id_to_name[f'{scene_name}|{match_obj_id}'] + key = f'{scene_name}|{match_obj_id}|{object_name}' + + if match_obj_iou > candidates['iou'][key]: + candidates['iou'][key] = match_obj_iou + candidates['caption'][key] = [captions[idx][prop_id]] + # DEBUG: checkout how many matched bounding boxes + # candidates[key] = ["this is a valid match!"] + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + # end of forward pass traversion + + ### message out + missing_proposals = len(corpus.keys() - candidates['caption'].keys()) + total_captions = len(corpus.keys()) + + ### make up placeholders for undetected bounding boxes + for missing_key in (corpus.keys() - candidates['caption'].keys()): + candidates['caption'][missing_key] = ['sos eos'] + + # find annotated objects in scanrefer + candidates = OrderedDict([ + (key, value) for key, value in sorted(candidates['caption'].items()) \ + if not key.endswith('unknown') + ]) + score_per_caption, message, eval_metric = score_captions( + OrderedDict([(key, corpus[key]) for key in candidates]), candidates) + + if is_primary(): + logout(f'\n----------------------Evaluation-----------------------\n' + f'INFO: iou@{args.test_min_iou} matched proposals: ' + f'[{total_captions - missing_proposals} / {total_captions}], ') + logout(message) + + with open( + os.path.join(args.checkpoint_dir, + task_name + '_densecap_corpus_val.json'), + 'w') as f: + json.dump(corpus, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + task_name + '_densecap_pred_val.json'), 'w') as f: + json.dump(candidates, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + task_name + '_densecap_pred_gt_val.json'), + 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + 'score': { + 'bleu-1': score_per_caption['bleu-1'][scene_object_id], + 'bleu-2': score_per_caption['bleu-2'][scene_object_id], + 'bleu-3': score_per_caption['bleu-3'][scene_object_id], + 'bleu-4': score_per_caption['bleu-4'][scene_object_id], + 'CiDEr': score_per_caption['cider'][scene_object_id], + 'rouge': score_per_caption['rouge'][scene_object_id], + 'meteor': score_per_caption['meteor'][scene_object_id] + } + } + json.dump(pred_gt_val, f, indent=4) + + eval_metrics = { + metric + f'@{args.test_min_iou}': score \ + for metric, score in eval_metric.items() + } + return eval_metrics diff --git a/models/LL3DA/eval_utils/evaluate_det.py b/models/LL3DA/eval_utils/evaluate_det.py new file mode 100644 index 0000000..4f0ec5a --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_det.py @@ -0,0 +1,87 @@ +import time + +import torch +from utils.ap_calculator import APCalculator +from utils.dist import all_gather_dict, barrier, is_primary +from utils.misc import SmoothedValue + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # ap calculator is exact for evaluation. + # This is slower than the ap calculator used during training. + ap_calculator = APCalculator( + dataset_config=dataset_config, + ap_iou_thresh=[0.25, 0.5], + class2type_map=dataset_config.class2type, + exact_eval=True, + ) + + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + } + outputs = model(model_input, is_eval=True) + + outputs = dict( + box_corners=outputs['box_corners'], + sem_cls_prob=outputs['sem_cls_prob'], + objectness_prob=outputs['objectness_prob'], + point_clouds=batch_data_label['point_clouds'], + gt_box_corners=batch_data_label['gt_box_corners'], + gt_box_sem_cls_label=batch_data_label['gt_box_sem_cls_label'], + gt_box_present=batch_data_label['gt_box_present'], + ) + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + # Memory intensive as it gathers point cloud GT tensor across all ranks + ap_calculator.step_meter({'outputs': outputs}, batch_data_label) + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + metrics = ap_calculator.compute_metrics() + metric_str = ap_calculator.metrics_to_str(metrics, per_class=True) + + if is_primary(): + logout('==' * 10) + logout(f'Evaluate Epoch [{curr_epoch}/{args.max_epoch}]') + logout(f'{metric_str}') + logout('==' * 10) + + eval_metrics = { + metric + f'@{args.test_min_iou}': score \ + for metric, score in metrics[args.test_min_iou].items() + } + return eval_metrics diff --git a/models/LL3DA/eval_utils/evaluate_dialogue.py b/models/LL3DA/eval_utils/evaluate_dialogue.py new file mode 100644 index 0000000..a04c873 --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_dialogue.py @@ -0,0 +1,185 @@ +import datetime +import importlib +import json +import math +import os +import sys +import time +from collections import OrderedDict, defaultdict + +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from utils.ap_calculator import APCalculator +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import (all_gather_dict, all_reduce_average, barrier, get_rank, + init_distributed, is_distributed, is_primary) +from utils.io import save_checkpoint +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +def score_captions(corpus: dict, candidates: dict): + + bleu = capblue.Bleu(4).compute_score(corpus, candidates) + cider = capcider.Cider().compute_score(corpus, candidates) + rouge = caprouge.Rouge().compute_score(corpus, candidates) + meteor = capmeteor.Meteor().compute_score(corpus, candidates) + + score_per_caption = { + 'bleu-1': [float(s) for s in bleu[1][0]], + 'bleu-2': [float(s) for s in bleu[1][1]], + 'bleu-3': [float(s) for s in bleu[1][2]], + 'bleu-4': [float(s) for s in bleu[1][3]], + 'cider': [float(s) for s in cider[1]], + 'rouge': [float(s) for s in rouge[1]], + 'meteor': [float(s) for s in meteor[1]], + } + + message = '\n'.join([ + '[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][0], max(bleu[1][0]), min(bleu[1][0])), + '[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][1], max(bleu[1][1]), min(bleu[1][1])), + '[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][2], max(bleu[1][2]), min(bleu[1][2])), + '[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][3], max(bleu[1][3]), min(bleu[1][3])), + '[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + cider[0], max(cider[1]), min(cider[1])), + '[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + rouge[0], max(rouge[1]), min(rouge[1])), + '[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + meteor[0], max(meteor[1]), min(meteor[1])) + ]) + + eval_metric = { + 'BLEU-4': bleu[0][3], + 'CiDEr': cider[0], + 'Rouge': rouge[0], + 'METEOR': meteor[0], + } + return score_per_caption, message, eval_metric + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + + annotations = dataset_loader.dataset.annotations + task_name = dataset_loader.dataset.task_name + corpus = { + '-'.join((anno['scene_id'], anno['question'])): anno['answers'] \ + for anno in annotations + } + candidates = {} + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + } + outputs = model(model_input, is_eval=True, task_name='chat') + + outputs = dict(output_ids=outputs['output_ids'], ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + output_ids = outputs['output_ids'] # batch x max_length + answers = tokenizer.batch_decode(output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + sample_index = batch_data_label['scan_idx'].cpu().tolist() + + for idx in range(output_ids.shape[0]): + anno = annotations[sample_index[idx]] + key = '-'.join((anno['scene_id'], anno['question'])) + answer = answers[idx] + answer = ' '.join(filter(lambda w: w, answer.split(' '))) + candidates[key] = [answer] + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + + # end of forward pass traversion + score_per_caption, message, eval_metric = score_captions( + OrderedDict([(key, corpus[key]) for key in candidates]), candidates) + + if is_primary(): + logout('\n----------------------Evaluation-----------------------\n') + logout(message) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_corpus_val.json'), 'w') as f: + json.dump(corpus, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_pred_val.json'), 'w') as f: + json.dump(candidates, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_pred_gt_val.json'), 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + 'score': { + 'bleu-1': score_per_caption['bleu-1'][scene_object_id], + 'bleu-2': score_per_caption['bleu-2'][scene_object_id], + 'bleu-3': score_per_caption['bleu-3'][scene_object_id], + 'bleu-4': score_per_caption['bleu-4'][scene_object_id], + 'CiDEr': score_per_caption['cider'][scene_object_id], + 'rouge': score_per_caption['rouge'][scene_object_id], + 'meteor': score_per_caption['meteor'][scene_object_id] + } + } + json.dump(pred_gt_val, f, indent=4) + + return eval_metric diff --git a/models/LL3DA/eval_utils/evaluate_embodied.py b/models/LL3DA/eval_utils/evaluate_embodied.py new file mode 100644 index 0000000..03bf0af --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_embodied.py @@ -0,0 +1,209 @@ +import datetime +import importlib +import json +import math +import os +import sys +import time +from collections import OrderedDict, defaultdict + +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from IPython import embed +from utils.ap_calculator import APCalculator +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import (all_gather_dict, all_reduce_average, barrier, get_rank, + init_distributed, is_distributed, is_primary) +from utils.io import save_checkpoint +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +def score_captions(corpus: dict, candidates: dict): + + bleu = capblue.Bleu(4).compute_score(corpus, candidates) + cider = capcider.Cider().compute_score(corpus, candidates) + rouge = caprouge.Rouge().compute_score(corpus, candidates) + meteor = capmeteor.Meteor().compute_score(corpus, candidates) + + score_per_caption = { + 'bleu-1': [float(s) for s in bleu[1][0]], + 'bleu-2': [float(s) for s in bleu[1][1]], + 'bleu-3': [float(s) for s in bleu[1][2]], + 'bleu-4': [float(s) for s in bleu[1][3]], + 'cider': [float(s) for s in cider[1]], + 'rouge': [float(s) for s in rouge[1]], + 'meteor': [float(s) for s in meteor[1]], + } + + message = '\n'.join([ + '[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][0], max(bleu[1][0]), min(bleu[1][0])), + '[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][1], max(bleu[1][1]), min(bleu[1][1])), + '[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][2], max(bleu[1][2]), min(bleu[1][2])), + '[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][3], max(bleu[1][3]), min(bleu[1][3])), + '[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + cider[0], max(cider[1]), min(cider[1])), + '[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + rouge[0], max(rouge[1]), min(rouge[1])), + '[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + meteor[0], max(meteor[1]), min(meteor[1])) + ]) + + eval_metric = { + 'BLEU-4': bleu[0][3], + 'CiDEr': cider[0], + 'Rouge': rouge[0], + 'METEOR': meteor[0], + } + return score_per_caption, message, eval_metric + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + + annotations = dataset_loader.dataset.annotations + + corpus = { + '-'.join((anno['ID'], anno['question'])): anno['answers'] if 'answers' in anno else anno['caption'] \ + for anno in annotations + } + candidates = {} + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + cnt = 0 + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + } + + outputs = model(model_input, is_eval=True, task_name='qa') + + outputs = dict(output_ids=outputs['output_ids'], ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + output_ids = outputs['output_ids'] # batch x max_length + answers = tokenizer.batch_decode(output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + sample_index = batch_data_label['scan_idx'].cpu().tolist() + + for idx in range(output_ids.shape[0]): + anno = annotations[sample_index[idx]] + key = '-'.join((anno['ID'], anno['question'])) + answer = answers[idx] + answer = ' '.join(filter(lambda w: w, answer.split(' '))) + print(answer) + candidates[key] = [answer] + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + if curr_iter % 200 == 0: + with open( + os.path.join( + args.checkpoint_dir, + f'embodiedscan_L_pred_gt_val_{curr_iter}.json'), + 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate( + candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': [corpus[scene_object_id_key]], + } + json.dump(pred_gt_val, f, indent=4) + barrier() + + # fix for the caption + + for key in corpus.keys(): + if isinstance(corpus[key], str): + corpus[key] = [corpus[key]] + + # end of forward pass traversion + score_per_caption, message, eval_metric = score_captions( + OrderedDict([(key, corpus[key]) for key in candidates]), candidates) + + if is_primary(): + logout('\n----------------------Evaluation-----------------------\n') + logout(message) + + with open( + os.path.join(args.checkpoint_dir, + 'embodiedscan_L_corpus_val.json'), 'w') as f: + json.dump(corpus, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + 'embodiedscan_L_pred_val.json'), 'w') as f: + json.dump(candidates, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + 'embodiedscan_L_pred_gt_val.json'), 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + 'score': { + 'bleu-1': score_per_caption['bleu-1'][scene_object_id], + 'bleu-2': score_per_caption['bleu-2'][scene_object_id], + 'bleu-3': score_per_caption['bleu-3'][scene_object_id], + 'bleu-4': score_per_caption['bleu-4'][scene_object_id], + 'CiDEr': score_per_caption['cider'][scene_object_id], + 'rouge': score_per_caption['rouge'][scene_object_id], + 'meteor': score_per_caption['meteor'][scene_object_id] + } + } + json.dump(pred_gt_val, f, indent=4) + + return eval_metric diff --git a/models/LL3DA/eval_utils/evaluate_gpt.py b/models/LL3DA/eval_utils/evaluate_gpt.py new file mode 100644 index 0000000..f5e8365 --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_gpt.py @@ -0,0 +1,37 @@ +import json +from argparse import ArgumentParser + +from mmscan import GPTEvaluator + + +def parse_form(results): + """Parse the format of output to comform with mmscan format.""" + item_list = [] + for id_with_Q in results: + item_ = {} + item_['ID'] = id_with_Q.split('@')[0] + item_['question'] = results[id_with_Q]['instruction'] + item_['pred'] = [results[id_with_Q]['pred']] + item_['gt'] = results[id_with_Q]['gt'] + item_list.append(item_) + return item_list + + +if __name__ == '__main__': + parser = ArgumentParser() + parser.add_argument('--file', type=str, required=True) + parser.add_argument('--tmp_path', type=str, required=True) + parser.add_argument('--api_key', type=str, required=True) + parser.add_argument('--eval_size', type=int, default=-1) + parser.add_argument('--nproc', type=int, default=8) + args = parser.parse_args() + + ll3da_file_path = args.file + + evaluator = GPTEvaluator(eval_size =args.eval_size,\ + api_key=args.api_key) + + with open(ll3da_file_path, 'r') as f: + results = json.load(f) + print(evaluator.load_and_eval(parse_form(results),num_threads=args.nproc,\ + tmp_path =args.tmp_path)) diff --git a/models/LL3DA/eval_utils/evaluate_mmscan.py b/models/LL3DA/eval_utils/evaluate_mmscan.py new file mode 100644 index 0000000..e05bba6 --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_mmscan.py @@ -0,0 +1,198 @@ +import datetime +import importlib +import json +import math +import os +import sys +import time +from collections import OrderedDict, defaultdict + +import torch +from utils.ap_calculator import APCalculator +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import (all_gather_dict, all_reduce_average, barrier, get_rank, + init_distributed, is_distributed, is_primary) +from utils.io import save_checkpoint +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + +from mmscan import QuestionAnsweringEvaluator + +model_config = { + 'simcse': '/mnt/petrelfs/linjingli/mmscan_modelzoo-main/evaluation/pc', + 'sbert': '/mnt/petrelfs/linjingli/mmscan_modelzoo-main/evaluation/st' +} + + +def to_mmscan_form(raw_input): + _input = {} + _input['ID'] = raw_input['ID'].split('@')[0] + _input['question'] = raw_input['ID'].split('@')[1] + _input['pred'] = raw_input['answer_pred'] + _input['gt'] = raw_input['answer_gt'] + + return _input + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + + evaluator = QuestionAnsweringEvaluator(model_config) + + annotations = dataset_loader.dataset.annotations + + + + corpus = { + '@'.join((anno['ID'], anno['question'])): anno['answers'] if 'answers' in anno else anno['caption'] \ + for anno in annotations + } + candidates = {} + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + try: + batch_data_label[key] = batch_data_label[key].to(net_device) + except: + continue + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + } + outputs = model(model_input, is_eval=True, task_name='qa') + + outputs = dict(output_ids=outputs['output_ids'], ) + + outputs = all_gather_dict(outputs) + + batch_data_label = all_gather_dict(batch_data_label) + + output_ids = outputs['output_ids'] # batch x max_length + answers = tokenizer.batch_decode(output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + sample_index = batch_data_label['scan_idx'].cpu().tolist() + + # rewrite the batch_result is ok + batch_results = [] + for idx in range(output_ids.shape[0]): + raw_input_dict = {} + + anno = annotations[sample_index[idx]] + key = '@'.join((anno['ID'], anno['question'])) + + # for the multi-gpu evaluation, we need to make sure that the same question is not evaluated multiple times + # This is caused by the distributed sampler, last several samples may be duplicated + if key in candidates: + continue + + answer = answers[idx] + answer = ' '.join(filter(lambda w: w, answer.split(' '))) + candidates[key] = answer + + raw_input_dict['ID'] = key + raw_input_dict['answer_pred'] = [answer] + raw_input_dict['answer_gt'] = corpus[key] + batch_results.append(to_mmscan_form(raw_input_dict)) + + evaluator.update(batch_results) + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + if curr_iter % 200 == 0: + with open( + os.path.join(args.checkpoint_dir, + f'qa_pred_gt_val_{curr_iter}.json'), + 'w') as f: + pred_gt_val = {} + for index_, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'instruction': scene_object_id_key.split('@')[1], + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + } + json.dump(pred_gt_val, f, indent=4) + print(f'save pred_gt_val {curr_iter}') + barrier() + + if is_primary(): + logout('\n----------------------Evaluation-----------------------\n') + + with open(os.path.join(args.checkpoint_dir, 'corpus_val.json'), + 'w') as f: + json.dump(corpus, f, indent=4) + + with open(os.path.join(args.checkpoint_dir, 'pred_val.json'), + 'w') as f: + json.dump(candidates, f, indent=4) + + with open(os.path.join(args.checkpoint_dir, 'qa_pred_gt_val.json'), + 'w') as f: + pred_gt_val = {} + for index_, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'instruction': scene_object_id_key.split('@')[1], + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + } + + json.dump(pred_gt_val, f, indent=4) + # end of forward pass traversion + metric_results = evaluator.start_evaluation() + results_record = evaluator.records + + if is_primary(): + + with open( + os.path.join(args.checkpoint_dir, + 'qa_pred_gt_val_with_scores.json'), 'w') as f: + pred_gt_val = {} + for index_, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'instruction': scene_object_id_key.split('@')[1], + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + } + pred_gt_val[scene_object_id_key].update(results_record[index_]) + json.dump(pred_gt_val, f, indent=4) + json.dump(metric_results, f, indent=4) + + evaluator.reset() + return metric_results diff --git a/models/LL3DA/eval_utils/evaluate_ovdet.py b/models/LL3DA/eval_utils/evaluate_ovdet.py new file mode 100644 index 0000000..0ab967b --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_ovdet.py @@ -0,0 +1,356 @@ +import json +import os +import re +import time +from collections import OrderedDict, defaultdict + +import numpy as np +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import all_gather_dict, barrier, is_primary +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +def score_captions(corpus: dict, candidates: dict): + + bleu = capblue.Bleu(4).compute_score(corpus, candidates) + cider = capcider.Cider().compute_score(corpus, candidates) + rouge = caprouge.Rouge().compute_score(corpus, candidates) + meteor = capmeteor.Meteor().compute_score(corpus, candidates) + + score_per_caption = { + 'bleu-1': [float(s) for s in bleu[1][0]], + 'bleu-2': [float(s) for s in bleu[1][1]], + 'bleu-3': [float(s) for s in bleu[1][2]], + 'bleu-4': [float(s) for s in bleu[1][3]], + 'cider': [float(s) for s in cider[1]], + 'rouge': [float(s) for s in rouge[1]], + 'meteor': [float(s) for s in meteor[1]], + } + + message = '\n'.join([ + '[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][0], max(bleu[1][0]), min(bleu[1][0])), + '[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][1], max(bleu[1][1]), min(bleu[1][1])), + '[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][2], max(bleu[1][2]), min(bleu[1][2])), + '[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][3], max(bleu[1][3]), min(bleu[1][3])), + '[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + cider[0], max(cider[1]), min(cider[1])), + '[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + rouge[0], max(rouge[1]), min(rouge[1])), + '[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + meteor[0], max(meteor[1]), min(meteor[1])) + ]) + + eval_metric = { + 'BLEU-4': bleu[0][3], + 'CiDEr': cider[0], + 'Rouge': rouge[0], + 'METEOR': meteor[0], + } + return score_per_caption, message, eval_metric + + +def prepare_corpus(raw_data, max_len: int = 30) -> dict: + # helper function to prepare ground truth captions + corpus = defaultdict(list) + object_id_to_name = defaultdict(lambda: 'unknown') + + for data in raw_data: + + (scene_id, object_id, object_name + ) = data['scene_id'], data['object_id'], data['object_name'] + + # parse language tokens + token = data['token'][:max_len] + description = ' '.join(['sos'] + token + ['eos']) + key = f'{scene_id}|{object_id}|{object_name}' + object_id_to_name[f'{scene_id}|{object_id}'] = object_name + + corpus[key].append(description) + + return corpus, object_id_to_name + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + scene_list = dataset_loader.dataset.scan_names + task_name = dataset_loader.dataset.task_name + corpus, object_id_to_name = prepare_corpus( + dataset_loader.dataset.scanrefer) + + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + candidates = {'caption': OrderedDict({}), 'iou': defaultdict(float)} + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + } + outputs = model(model_input, is_eval=True, task_name='ov-det') + + outputs = dict( + box_corners=outputs['box_corners'], + sem_cls_prob=outputs['sem_cls_prob'], + objectness_prob=outputs['objectness_prob'], + output_ids=outputs['output_ids'], + sem_cls_logits=outputs['sem_cls_logits'], + ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + ### decoding captions for bounding boxes + batch_size, MAX_NUM_OBJ, _, _ = batch_data_label[ + 'gt_box_corners'].shape + _, nqueries, _, _ = outputs['box_corners'].shape + + output_ids = outputs['output_ids'] # batch x nqueries x max_length + captions = tokenizer.batch_decode(output_ids.reshape( + -1, output_ids.shape[-1]), + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + ### replace box corners + caption_box_coords = np.zeros((len(captions), 7)) + caption_box_mask = np.zeros((len(captions), )) + + for cap_id, caption in enumerate(captions): + try: + # try to decode the caption into 3D boxes + coord_str = re.findall(r'(.*?)', caption)[0] + x, y, z, w, h, l = map(float, coord_str.split(',')) + caption_box_coords[cap_id, :6] = x, y, z, w, h, l + caption_box_mask[cap_id] = 1 + except: + continue + + point_cloud_dims_min = batch_data_label['point_cloud_dims_min'] + point_cloud_dims_max = batch_data_label['point_cloud_dims_max'] + + caption_box_coords = caption_box_coords.reshape( + batch_size, nqueries, 7) + caption_box_coords = torch.from_numpy(caption_box_coords).to( + net_device) + caption_box_mask = caption_box_mask.reshape(batch_size, nqueries) + caption_box_mask = torch.from_numpy(caption_box_mask).to(net_device) + + # batch x nqueries x 7 + caption_box_coords = caption_box_coords / args.grid_size_3d + caption_box_center = caption_box_coords[..., :3] + caption_box_size = caption_box_coords[..., 3:6] + + scene_scale = (point_cloud_dims_max - point_cloud_dims_min).reshape( + batch_size, 1, 3) + scene_floor = point_cloud_dims_min.reshape(batch_size, 1, 3) + caption_box_center = caption_box_center * scene_scale + scene_floor + caption_box_size = caption_box_size * scene_scale + caption_box_angle = caption_box_coords[..., -1].reshape( + batch_size, nqueries) + + # batch x nqueries x 8 x 3 + caption_box_corners = dataset_config.box_parametrization_to_corners( + caption_box_center, # batch x + caption_box_size, + caption_box_angle) + box_corners = torch.zeros_like(outputs['box_corners']) + box_corners[caption_box_mask == 1] = caption_box_corners[ + caption_box_mask == 1] + outputs['box_corners'] = box_corners + + # reshape caption + post_process = lambda cap: ('sos ' + cap + ' eos').replace(' ', ' ') + captions = [ + [ + post_process(captions[bid * nqueries + pid]) \ + for pid in range(nqueries) + ] \ + for bid in range(batch_size) + ] + + # ---- add nms to get accurate predictions + good_bbox_masks = outputs['sem_cls_logits'].argmax(-1) != ( + outputs['sem_cls_logits'].shape[-1] - 1) + nms_bbox_masks = parse_predictions( # batch x nqueries + outputs['box_corners'], outputs['sem_cls_prob'], + outputs['objectness_prob'], batch_data_label['point_clouds']) + nms_bbox_masks = torch.from_numpy(nms_bbox_masks).long() == 1 + good_bbox_masks &= nms_bbox_masks.to(good_bbox_masks.device) + + match_box_ious = box3d_iou_batch_tensor( # batch, nqueries, MAX_NUM_OBJ + (outputs['box_corners'].unsqueeze(2).repeat( + 1, 1, MAX_NUM_OBJ, 1, 1).view(-1, 8, 3)), + (batch_data_label['gt_box_corners'].unsqueeze(1).repeat( + 1, nqueries, 1, 1, 1).view(-1, 8, 3))).view( + batch_size, nqueries, MAX_NUM_OBJ) + match_box_ious, match_box_idxs = match_box_ious.max( + -1) # batch, nqueries + match_box_idxs = torch.gather(batch_data_label['gt_object_ids'], 1, + match_box_idxs) # batch, nqueries + + # store into visualization directory for further evaluation + for batch_id, scene_id in enumerate( + batch_data_label['scan_idx'].cpu().tolist()): + scene_name = scene_list[scene_id] + scene_prediction = [] + scene_retain_results = good_bbox_masks[batch_id].tolist( + ) # nqueries + for prop_id in range(nqueries): + if scene_retain_results[prop_id] is False: + continue + if captions[batch_id][prop_id] == 'sos eos': + continue + scene_prediction.append( + dict(bbox=outputs['box_corners'][batch_id, + prop_id].cpu().tolist(), + caption=captions[batch_id][prop_id], + match_iou=match_box_ious[batch_id, + prop_id].cpu().tolist())) + dump_dir = os.path.join(args.checkpoint_dir, + f'{task_name}_viz_ovdet') + os.makedirs(dump_dir, exist_ok=True) + with open(os.path.join(dump_dir, scene_name + '.json'), + 'w') as file: + json.dump(scene_prediction, file, indent=4) + + # ---- Checkout bounding box ious and semantic logits + good_bbox_masks &= match_box_ious > args.test_min_iou # batch, nqueries + good_bbox_masks = good_bbox_masks.cpu().tolist() + + match_box_idxs = match_box_idxs.cpu().tolist() + match_box_ious = match_box_ious.cpu().tolist() + + ### calculate measurable indicators on captions + for idx, scene_id in enumerate( + batch_data_label['scan_idx'].cpu().tolist()): + scene_name = scene_list[scene_id] + for prop_id in range(nqueries): + + if good_bbox_masks[idx][prop_id] is False: + continue + + match_obj_id = match_box_idxs[idx][prop_id] + match_obj_iou = match_box_ious[idx][prop_id] + + object_name = object_id_to_name[f'{scene_name}|{match_obj_id}'] + key = f'{scene_name}|{match_obj_id}|{object_name}' + + if match_obj_iou > candidates['iou'][key]: + candidates['iou'][key] = match_obj_iou + caption = 'this is matched, but somehow no captions' + if captions[idx][prop_id] != 'sos eos': + caption = captions[idx][prop_id] + candidates['caption'][key] = [caption] + # DEBUG: checkout how many matched bounding boxes + # candidates[key] = ["this is a valid match!"] + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + # end of forward pass traversion + + ### message out + missing_proposals = len(corpus.keys() - candidates['caption'].keys()) + total_captions = len(corpus.keys()) + + ### make up placeholders for undetected bounding boxes + for missing_key in (corpus.keys() - candidates['caption'].keys()): + candidates['caption'][missing_key] = ['sos eos'] + + # find annotated objects in scanrefer + candidates = OrderedDict([ + (key, value) for key, value in sorted(candidates['caption'].items()) \ + if not key.endswith('unknown') + ]) + score_per_caption, message, eval_metric = score_captions( + OrderedDict([(key, corpus[key]) for key in candidates]), candidates) + + if is_primary(): + logout(f'\n----------------------Evaluation-----------------------\n' + f'INFO: iou@{args.test_min_iou} matched proposals: ' + f'[{total_captions - missing_proposals} / {total_captions}], ') + logout(message) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_corpus_val.json'), 'w') as f: + json.dump(corpus, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_pred_val.json'), 'w') as f: + json.dump(candidates, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_pred_gt_val.json'), 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + 'score': { + 'bleu-1': score_per_caption['bleu-1'][scene_object_id], + 'bleu-2': score_per_caption['bleu-2'][scene_object_id], + 'bleu-3': score_per_caption['bleu-3'][scene_object_id], + 'bleu-4': score_per_caption['bleu-4'][scene_object_id], + 'CiDEr': score_per_caption['cider'][scene_object_id], + 'rouge': score_per_caption['rouge'][scene_object_id], + 'meteor': score_per_caption['meteor'][scene_object_id] + } + } + json.dump(pred_gt_val, f, indent=4) + + eval_metrics = { + metric + f'@{args.test_min_iou}': score \ + for metric, score in eval_metric.items() + } + eval_metrics['CiDEr@0.5'] = total_captions - missing_proposals + return eval_metrics diff --git a/models/LL3DA/eval_utils/evaluate_qa.py b/models/LL3DA/eval_utils/evaluate_qa.py new file mode 100644 index 0000000..3e520d5 --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_qa.py @@ -0,0 +1,181 @@ +import datetime +import importlib +import json +import math +import os +import sys +import time +from collections import OrderedDict, defaultdict + +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from utils.ap_calculator import APCalculator +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import (all_gather_dict, all_reduce_average, barrier, get_rank, + init_distributed, is_distributed, is_primary) +from utils.io import save_checkpoint +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +def score_captions(corpus: dict, candidates: dict): + + bleu = capblue.Bleu(4).compute_score(corpus, candidates) + cider = capcider.Cider().compute_score(corpus, candidates) + rouge = caprouge.Rouge().compute_score(corpus, candidates) + meteor = capmeteor.Meteor().compute_score(corpus, candidates) + + score_per_caption = { + 'bleu-1': [float(s) for s in bleu[1][0]], + 'bleu-2': [float(s) for s in bleu[1][1]], + 'bleu-3': [float(s) for s in bleu[1][2]], + 'bleu-4': [float(s) for s in bleu[1][3]], + 'cider': [float(s) for s in cider[1]], + 'rouge': [float(s) for s in rouge[1]], + 'meteor': [float(s) for s in meteor[1]], + } + + message = '\n'.join([ + '[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][0], max(bleu[1][0]), min(bleu[1][0])), + '[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][1], max(bleu[1][1]), min(bleu[1][1])), + '[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][2], max(bleu[1][2]), min(bleu[1][2])), + '[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][3], max(bleu[1][3]), min(bleu[1][3])), + '[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + cider[0], max(cider[1]), min(cider[1])), + '[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + rouge[0], max(rouge[1]), min(rouge[1])), + '[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + meteor[0], max(meteor[1]), min(meteor[1])) + ]) + + eval_metric = { + 'BLEU-4': bleu[0][3], + 'CiDEr': cider[0], + 'Rouge': rouge[0], + 'METEOR': meteor[0], + } + return score_per_caption, message, eval_metric + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + + annotations = dataset_loader.dataset.annotations + corpus = { + '-'.join((anno['question_id'], anno['question'])): anno['answers'] \ + for anno in annotations + } + candidates = {} + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + } + outputs = model(model_input, is_eval=True, task_name='qa') + + outputs = dict(output_ids=outputs['output_ids'], ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + output_ids = outputs['output_ids'] # batch x max_length + answers = tokenizer.batch_decode(output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + sample_index = batch_data_label['scan_idx'].cpu().tolist() + + for idx in range(output_ids.shape[0]): + anno = annotations[sample_index[idx]] + key = '-'.join((anno['question_id'], anno['question'])) + answer = answers[idx] + answer = ' '.join(filter(lambda w: w, answer.split(' '))) + candidates[key] = [answer] + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + + # end of forward pass traversion + score_per_caption, message, eval_metric = score_captions( + OrderedDict([(key, corpus[key]) for key in candidates]), candidates) + + if is_primary(): + logout('\n----------------------Evaluation-----------------------\n') + logout(message) + + with open(os.path.join(args.checkpoint_dir, 'qa_corpus_val.json'), + 'w') as f: + json.dump(corpus, f, indent=4) + + with open(os.path.join(args.checkpoint_dir, 'qa_pred_val.json'), + 'w') as f: + json.dump(candidates, f, indent=4) + + with open(os.path.join(args.checkpoint_dir, 'qa_pred_gt_val.json'), + 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + 'score': { + 'bleu-1': score_per_caption['bleu-1'][scene_object_id], + 'bleu-2': score_per_caption['bleu-2'][scene_object_id], + 'bleu-3': score_per_caption['bleu-3'][scene_object_id], + 'bleu-4': score_per_caption['bleu-4'][scene_object_id], + 'CiDEr': score_per_caption['cider'][scene_object_id], + 'rouge': score_per_caption['rouge'][scene_object_id], + 'meteor': score_per_caption['meteor'][scene_object_id] + } + } + json.dump(pred_gt_val, f, indent=4) + + return eval_metric diff --git a/models/LL3DA/eval_utils/evaluate_scene_description.py b/models/LL3DA/eval_utils/evaluate_scene_description.py new file mode 100644 index 0000000..7a8e2fa --- /dev/null +++ b/models/LL3DA/eval_utils/evaluate_scene_description.py @@ -0,0 +1,188 @@ +import datetime +import importlib +import json +import math +import os +import sys +import time +from collections import OrderedDict, defaultdict + +import torch +import utils.capeval.bleu.bleu as capblue +import utils.capeval.cider.cider as capcider +import utils.capeval.meteor.meteor as capmeteor +import utils.capeval.rouge.rouge as caprouge +from utils.ap_calculator import APCalculator +from utils.box_util import box3d_iou_batch_tensor +from utils.dist import (all_gather_dict, all_reduce_average, barrier, get_rank, + init_distributed, is_distributed, is_primary) +from utils.io import save_checkpoint +from utils.misc import SmoothedValue +from utils.proposal_parser import parse_predictions + + +def score_captions(corpus: dict, candidates: dict): + + bleu = capblue.Bleu(4).compute_score(corpus, candidates) + cider = capcider.Cider().compute_score(corpus, candidates) + rouge = caprouge.Rouge().compute_score(corpus, candidates) + meteor = capmeteor.Meteor().compute_score(corpus, candidates) + + score_per_caption = { + 'bleu-1': [float(s) for s in bleu[1][0]], + 'bleu-2': [float(s) for s in bleu[1][1]], + 'bleu-3': [float(s) for s in bleu[1][2]], + 'bleu-4': [float(s) for s in bleu[1][3]], + 'cider': [float(s) for s in cider[1]], + 'rouge': [float(s) for s in rouge[1]], + 'meteor': [float(s) for s in meteor[1]], + } + + message = '\n'.join([ + '[BLEU-1] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][0], max(bleu[1][0]), min(bleu[1][0])), + '[BLEU-2] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][1], max(bleu[1][1]), min(bleu[1][1])), + '[BLEU-3] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][2], max(bleu[1][2]), min(bleu[1][2])), + '[BLEU-4] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + bleu[0][3], max(bleu[1][3]), min(bleu[1][3])), + '[CIDEr] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + cider[0], max(cider[1]), min(cider[1])), + '[ROUGE-L] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + rouge[0], max(rouge[1]), min(rouge[1])), + '[METEOR] Mean: {:.4f}, Max: {:.4f}, Min: {:.4f}'.format( + meteor[0], max(meteor[1]), min(meteor[1])) + ]) + + eval_metric = { + 'BLEU-4': bleu[0][3], + 'CiDEr': cider[0], + 'Rouge': rouge[0], + 'METEOR': meteor[0], + } + return score_per_caption, message, eval_metric + + +@torch.no_grad() +def evaluate( + args, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + + descriptions = dataset_loader.dataset.descriptions + scan_names = dataset_loader.dataset.scan_names + task_name = dataset_loader.dataset.task_name + corpus = {} + for anno in descriptions: + scene_id = anno['scene_id'] + corpus[scene_id] = corpus.get(scene_id, + []) + [anno['answers'][0].lower()] + + candidates = {} + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + } + outputs = model(model_input, is_eval=True, task_name='chat') + + outputs = dict(output_ids=outputs['output_ids'], ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + output_ids = outputs['output_ids'] # batch x max_length + answers = tokenizer.batch_decode(output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + sample_index = batch_data_label['scan_idx'].cpu().tolist() + + for idx in range(output_ids.shape[0]): + scene_id = scan_names[sample_index[idx]] + key = scene_id + answer = answers[idx] + answer = ' '.join(filter(lambda w: w, answer.split(' '))) + candidates[key] = [answer] + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + + # end of forward pass traversion + score_per_caption, message, eval_metric = score_captions( + OrderedDict([(key, corpus[key]) for key in candidates]), candidates) + + if is_primary(): + logout('\n----------------------Evaluation-----------------------\n') + logout(message) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_corpus_val.json'), 'w') as f: + json.dump(corpus, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_pred_val.json'), 'w') as f: + json.dump(candidates, f, indent=4) + + with open( + os.path.join(args.checkpoint_dir, + f'{task_name}_pred_gt_val.json'), 'w') as f: + pred_gt_val = {} + for scene_object_id, scene_object_id_key in enumerate(candidates): + pred_gt_val[scene_object_id_key] = { + 'pred': candidates[scene_object_id_key], + 'gt': corpus[scene_object_id_key], + 'score': { + 'bleu-1': score_per_caption['bleu-1'][scene_object_id], + 'bleu-2': score_per_caption['bleu-2'][scene_object_id], + 'bleu-3': score_per_caption['bleu-3'][scene_object_id], + 'bleu-4': score_per_caption['bleu-4'][scene_object_id], + 'CiDEr': score_per_caption['cider'][scene_object_id], + 'rouge': score_per_caption['rouge'][scene_object_id], + 'meteor': score_per_caption['meteor'][scene_object_id] + } + } + json.dump(pred_gt_val, f, indent=4) + + return eval_metric diff --git a/models/LL3DA/main.py b/models/LL3DA/main.py new file mode 100644 index 0000000..504a317 --- /dev/null +++ b/models/LL3DA/main.py @@ -0,0 +1,338 @@ +import argparse +import importlib +import os +from collections import OrderedDict + +import numpy as np +import torch +from datasets.scannet_base_dataset import DatasetConfig +from engine import do_train +from models.model_general import CaptionNet +from torch.multiprocessing import set_start_method +from utils.dist import (barrier, get_rank, init_distributed, is_distributed, + is_primary) +from utils.io import resume_if_possible +from utils.misc import my_worker_init_fn + + +def make_args_parser(): + parser = argparse.ArgumentParser( + 'LL3DA: Visual Interactive Instruction Tuning for Omni-3D Understanding, Reasoning, and Planning', + add_help=False) + + ##### Optimizer ##### + parser.add_argument('--base_lr', default=5e-4, type=float) + parser.add_argument('--final_lr', default=1e-6, type=float) + parser.add_argument('--lr_scheduler', default='cosine', type=str) + parser.add_argument('--weight_decay', default=0.1, type=float) + parser.add_argument('--optimizer', default='AdamW', type=str) + parser.add_argument('--clip_gradient', + default=0.1, + type=float, + help='Max L2 norm of the gradient') + # DISABLE warmup learning rate during dense caption training + parser.add_argument('--warm_lr', default=1e-6, type=float) + parser.add_argument('--warm_lr_epochs', default=9, type=int) + # only ACTIVATE during dense caption training + parser.add_argument('--pretrained_params_lr', default=None, type=float) + parser.add_argument('--pretrained_weights', default=None, type=str) + + ##### Model ##### + # input based parameters + parser.add_argument('--use_color', default=False, action='store_true') + parser.add_argument('--use_normal', default=False, action='store_true') + parser.add_argument('--no_height', default=False, action='store_true') + parser.add_argument('--use_multiview', default=False, action='store_true') + + parser.add_argument('--detector', + default='detector_Vote2Cap_DETR', + help='folder of the detector') + parser.add_argument('--captioner', + default=None, + type=str, + help='folder of the captioner') + # training strategy + parser.add_argument( + '--freeze_detector', + default=False, + action='store_true', + help='freeze all parameters other than the caption head') + parser.add_argument('--freeze_llm', + default=False, + action='store_true', + help='freeze the llm for caption generation') + # caption related hyper parameters + parser.add_argument( + '--use_beam_search', + default=False, + action='store_true', + help='whether use beam search during caption generation.') + parser.add_argument('--max_des_len', + default=128, + type=int, + help='maximum length of object descriptions.') + parser.add_argument('--max_gen_len', + default=32, + type=int, + help='maximum length of object descriptions.') + + ##### Dataset ##### + parser.add_argument('--max_prompts', + default=16, + type=int, + help='number of visual interactions') + parser.add_argument('--dataset', + default='scannet', + help="dataset list split by ','") + parser.add_argument('--grid_size_3d', + default=255, + type=int, + help='grid size of the 3D scene') + parser.add_argument('--vocab', + default='llama-hf/7B', + type=str, + help='The LLM backend') + parser.add_argument('--qformer_vocab', + default='bert-base-uncased', + type=str, + help='The QFormer backend') + + parser.add_argument('--dataset_num_workers', default=4, type=int) + parser.add_argument('--batchsize_per_gpu', default=8, type=int) + + ##### Training ##### + parser.add_argument('--start_epoch', default=-1, type=int) + parser.add_argument('--max_epoch', default=1080, type=int) + parser.add_argument('--start_eval_after', default=-1, type=int) + parser.add_argument('--eval_every_iteration', default=4000, type=int) + parser.add_argument('--seed', default=0, type=int) + + ##### Testing ##### + parser.add_argument('--test_only', default=False, action='store_true') + parser.add_argument( + '--test_min_iou', + default=0.50, + type=float, + help='minimum iou for evaluating dense caption performance') + parser.add_argument('--criterion', + default='CiDEr', + type=str, + help='metrics for saving the best model') + parser.add_argument('--test_ckpt', default='', type=str) + + ##### I/O ##### + parser.add_argument('--checkpoint_dir', default=None, type=str) + parser.add_argument('--save_every', default=4000, type=int) + parser.add_argument('--log_every', default=10, type=int) + parser.add_argument('--filter_name', + default='captioner.transformer.', + type=str) + + ##### Distributed ##### + parser.add_argument('--ngpus', default=1, type=int, help='number of gpus') + parser.add_argument('--dist_url', + default='tcp://localhost:12345', + type=str) + + args = parser.parse_args() + args.use_height = not args.no_height + + return args + + +def build_dataloader_func(args, dataset, split): + if is_distributed(): + sampler = torch.utils.data.DistributedSampler( + dataset, shuffle=(split == 'train')) + + print('Sample different samples for each GPUs.') + else: + if split == 'train': + sampler = torch.utils.data.RandomSampler(dataset) + else: + sampler = torch.utils.data.SequentialSampler(dataset) + dataloader = torch.utils.data.DataLoader( + dataset, + sampler=sampler, + batch_size=args.batchsize_per_gpu, + num_workers=args.dataset_num_workers, + worker_init_fn=my_worker_init_fn, + ) + return sampler, dataloader + + +def build_dataset(args): + + dataset_config = DatasetConfig() + datasets = {'train': None, 'test': []} + + train_datasets = [] + for dataset in args.dataset.split(','): + dataset_module = importlib.import_module(f'datasets.{dataset}') + train_datasets.append( + dataset_module.Dataset(args, + dataset_config, + split_set='train', + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview, + use_height=args.use_height, + augment=True)) + datasets['test'].append( + dataset_module.Dataset(args, + dataset_config, + split_set='val', + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview, + use_height=args.use_height, + augment=False)) + datasets['train'] = torch.utils.data.ConcatDataset(train_datasets) + + train_sampler, train_loader = build_dataloader_func(args, + datasets['train'], + split='train') + dataloaders = { + 'train': train_loader, + 'test': [], + 'train_sampler': train_sampler, + } + for dataset in datasets['test']: + _, test_loader = build_dataloader_func(args, dataset, split='test') + dataloaders['test'].append(test_loader) + + return dataset_config, datasets, dataloaders + + +def main(local_rank, args): + + if args.ngpus > 1: + init_distributed( + local_rank, + global_rank=local_rank, + world_size=args.ngpus, + dist_url=args.dist_url, + dist_backend='nccl', + ) + + torch.cuda.set_device(local_rank) + np.random.seed(args.seed) + torch.cuda.manual_seed_all(args.seed + get_rank()) + + if args.checkpoint_dir is not None: + pass + elif args.test_ckpt is not None: + args.checkpoint_dir = os.path.dirname(args.test_ckpt) + print(f'testing directory: {args.checkpoint_dir}') + else: + raise AssertionError( + 'Either checkpoint_dir or test_ckpt should be presented!') + os.makedirs(args.checkpoint_dir, exist_ok=True) + + ### build datasets and dataloaders + dataset_config, datasets, dataloaders = build_dataset(args) + model = CaptionNet(args, dataset_config, datasets['train']) + + # testing phase + if args.test_only: + + try: + checkpoint = torch.load(args.test_ckpt, + map_location=torch.device('cpu')) + model.load_state_dict(checkpoint['model'], strict=False) + except: + print('test the model from scratch...') + + model_no_ddp = model.cuda() + model = model.cuda(local_rank) + + if is_distributed(): + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[local_rank]) + + for test_loader in dataloaders['test']: + test_loader.dataset.eval_func(args, -1, model, dataset_config, + test_loader) + + # training phase + else: + + assert (args.checkpoint_dir is not None + ), 'Please specify a checkpoint dir using --checkpoint_dir' + os.makedirs(args.checkpoint_dir, exist_ok=True) + + ### whether or not use pretrained weights + if args.pretrained_weights is not None: + checkpoint = torch.load(args.pretrained_weights, + map_location='cpu') + model.load_state_dict(checkpoint['model'], strict=False) + + print('==== ====') + print('==== loading following pre-trained parameters ====') + print('==== ====') + for name, param in checkpoint['model'].items(): + print('\t', name, param.shape) + + model_no_ddp = model.cuda() + model = model.cuda(local_rank) + + if is_distributed(): + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[local_rank]) + + if args.optimizer == 'AdamW': + optimizer = torch.optim.AdamW(filter( + lambda params: params.requires_grad, + model_no_ddp.parameters()), + lr=args.base_lr, + weight_decay=args.weight_decay) + elif args.optimizer == 'SGD': + optimizer = torch.optim.SGD(filter( + lambda params: params.requires_grad, + model_no_ddp.parameters()), + lr=args.base_lr, + weight_decay=args.weight_decay) + else: + raise NotImplementedError + + print('==== ====') + print('==== Only training the following parameters ====') + print('==== ====') + for name, param in model_no_ddp.named_parameters(): + if param.requires_grad is True: + print('\t', name, param.shape) + + loaded_epoch, best_val_metrics = resume_if_possible( + args.checkpoint_dir, model_no_ddp, optimizer) + args.start_epoch = loaded_epoch + 1 + do_train( + args, + model, + model_no_ddp, + optimizer, + dataset_config, + dataloaders, + best_val_metrics, + ) + + +def launch_distributed(args): + world_size = args.ngpus + if world_size == 1: + main(local_rank=0, args=args) + else: + torch.multiprocessing.spawn(main, nprocs=world_size, args=(args, )) + + +if __name__ == '__main__': + args = make_args_parser() + + os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' + + try: + set_start_method('spawn') + except RuntimeError: + pass + launch_distributed(args) diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/config.py b/models/LL3DA/models/detector_Vote2Cap_DETR/config.py new file mode 100644 index 0000000..6cb3233 --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/config.py @@ -0,0 +1,58 @@ +import os + +import numpy as np +from datasets.scannet import BASE + + +class model_config: + + def __init__(self, args, dataset_config): + + self.dataset_config = dataset_config + self.num_class = dataset_config.num_semcls + + # preencoder: Set Abstraction Layer + self.in_channel = ( + 3 * (int(args.use_color) + int(args.use_normal)) + \ + 1 * int(args.use_height) + \ + 128 * int(args.use_multiview) + ) + self.preenc_npoints = 2048 + + # position embedding + self.pos_embed = 'fourier' + + # encoder + self.enc_type = 'masked' + self.enc_nlayers = 3 + self.enc_dim = 256 + self.enc_ffn_dim = 128 + self.enc_dropout = 0.1 + self.enc_nhead = 4 + self.enc_activation = 'relu' + + # decoder + self.nqueries = 256 + self.dec_nlayers = 8 + self.dec_dim = 256 + self.dec_ffn_dim = 256 + self.dec_dropout = 0.1 + self.dec_nhead = 4 + + # mlp heads + self.mlp_dropout = 0.3 + + ### Matcher + self.matcher_giou_cost = 2. + self.matcher_cls_cost = 1. + self.matcher_center_cost = 0. + self.matcher_objectness_cost = 0. + + ### Loss Weights + self.loss_giou_weight = 10. + self.loss_sem_cls_weight = 1. + self.loss_no_object_weight = 0.25 + self.loss_angle_cls_weight = 0.1 + self.loss_angle_reg_weight = 0.5 + self.loss_center_weight = 5. + self.loss_size_weight = 1. diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/criterion.py b/models/LL3DA/models/detector_Vote2Cap_DETR/criterion.py new file mode 100644 index 0000000..07fd335 --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/criterion.py @@ -0,0 +1,538 @@ +from typing import Dict + +import numpy as np +import torch +import torch.nn.functional as F +from scipy.optimize import linear_sum_assignment +from torch import Tensor, nn +from utils.box_util import generalized_box3d_iou +from utils.dist import all_reduce_average +from utils.misc import huber_loss + +GT_VOTE_FACTOR = 3 # number of GT votes per point + + +def nn_distance(pc1, + pc2, + l1smooth=False, + delta=1.0, + l1=False, + return_distance=False): + """ + Input: + pc1: (B,N,C) torch tensor + pc2: (B,M,C) torch tensor + l1smooth: bool, whether to use l1smooth loss + delta: scalar, the delta used in l1smooth loss + Output: + dist1: (B,N) torch float32 tensor + idx1: (B,N) torch int64 tensor + dist2: (B,M) torch float32 tensor + idx2: (B,M) torch int64 tensor + """ + N = pc1.shape[1] + M = pc2.shape[1] + pc1_expand_tile = pc1.unsqueeze(2).repeat(1, 1, M, 1) + pc2_expand_tile = pc2.unsqueeze(1).repeat(1, N, 1, 1) + pc_diff = pc1_expand_tile - pc2_expand_tile + + if l1smooth: + pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1) # (B,N,M) + elif l1: + pc_dist = torch.sum(torch.abs(pc_diff), dim=-1) # (B,N,M) + else: + pc_dist = torch.sum(pc_diff**2, dim=-1) # (B,N,M) + dist1, idx1 = torch.min(pc_dist, dim=2) # (B,N) + dist2, idx2 = torch.min(pc_dist, dim=1) # (B,M) + if return_distance: + return dist1, idx1, dist2, idx2, pc_dist + else: + return dist1, idx1, dist2, idx2 + + +class Matcher(nn.Module): + + def __init__(self, cost_class, cost_objectness, cost_giou, cost_center): + """ + Parameters: + cost_class: + Returns: + + """ + super().__init__() + self.cost_class = cost_class + self.cost_objectness = cost_objectness + self.cost_giou = cost_giou + self.cost_center = cost_center + + @torch.no_grad() + def forward(self, outputs, targets): + + batchsize = outputs['sem_cls_prob'].shape[0] + nqueries = outputs['sem_cls_prob'].shape[1] + ngt = targets['gt_box_sem_cls_label'].shape[1] + nactual_gt = targets['nactual_gt'] + + # classification cost: batch x nqueries x ngt matrix + pred_cls_prob = outputs['sem_cls_prob'] + gt_box_sem_cls_labels = ( + targets['gt_box_sem_cls_label'].unsqueeze(1).expand( + batchsize, nqueries, ngt)) + class_mat = -torch.gather(pred_cls_prob, 2, gt_box_sem_cls_labels) + + # objectness cost: batch x nqueries x 1 + objectness_mat = -outputs['objectness_prob'].unsqueeze(-1) + + # center cost: batch x nqueries x ngt + center_mat = outputs['center_dist'].detach() + + # giou cost: batch x nqueries x ngt + giou_mat = -outputs['gious'].detach() + + final_cost = (self.cost_class * class_mat + + self.cost_objectness * objectness_mat + + self.cost_center * center_mat + + self.cost_giou * giou_mat) + + final_cost = final_cost.detach().cpu().numpy() + assignments = [] + + # auxiliary variables useful for batched loss computation + batch_size, nprop = final_cost.shape[0], final_cost.shape[1] + per_prop_gt_inds = torch.zeros([batch_size, nprop], + dtype=torch.int64, + device=pred_cls_prob.device) + proposal_matched_mask = torch.zeros([batch_size, nprop], + dtype=torch.float32, + device=pred_cls_prob.device) + for b in range(batchsize): + assign = [] + if nactual_gt[b] > 0: + assign = linear_sum_assignment( + final_cost[b, :, :nactual_gt[b]]) + assign = [ + torch.from_numpy(x).long().to(device=pred_cls_prob.device) + for x in assign + ] + per_prop_gt_inds[b, assign[0]] = assign[1] + proposal_matched_mask[b, assign[0]] = 1 + assignments.append(assign) + + return { + 'assignments': assignments, + 'per_prop_gt_inds': per_prop_gt_inds, + 'proposal_matched_mask': proposal_matched_mask, + } + + +class SetPredictionCriterion(nn.Module): + + def __init__(self, matcher, dataset_config, loss_weight_dict): + super(SetPredictionCriterion, self).__init__() + + self.dataset_config = dataset_config + self.matcher = matcher + self.loss_weight_dict = loss_weight_dict + + semcls_percls_weights = torch.ones(dataset_config.num_semcls + 1) + semcls_percls_weights[-1] = loss_weight_dict['loss_no_object_weight'] + del loss_weight_dict['loss_no_object_weight'] + self.register_buffer('semcls_percls_weights', semcls_percls_weights) + + self.loss_functions = { + 'loss_sem_cls': self.loss_sem_cls, + 'loss_angle': self.loss_angle, + 'loss_center': self.loss_center, + 'loss_size': self.loss_size, + 'loss_giou': self.loss_giou, + # this isn't used during training and is logged for debugging. + # thus, this loss does not have a loss_weight associated with it. + 'loss_cardinality': self.loss_cardinality, + } + + @torch.no_grad() + def loss_cardinality(self, outputs, targets, assignments): + # Count the number of predictions that are objects + # Cardinality is the error between predicted #objects and ground truth objects + + pred_logits = outputs['sem_cls_logits'] + # Count the number of predictions that are NOT "no-object" (which is the last class) + pred_objects = (pred_logits.argmax(-1) != + pred_logits.shape[-1] - 1).sum(1) + card_err = F.l1_loss(pred_objects.float(), targets['nactual_gt']) + return {'loss_cardinality': card_err} + + def loss_sem_cls(self, outputs, targets, assignments): + + # # Not vectorized version + # pred_logits = outputs["sem_cls_logits"] + # assign = assignments["assignments"] + + # sem_cls_targets = torch.ones((pred_logits.shape[0], pred_logits.shape[1]), + # dtype=torch.int64, device=pred_logits.device) + + # # initialize to background/no-object class + # sem_cls_targets *= (pred_logits.shape[-1] - 1) + + # # use assignments to compute labels for matched boxes + # for b in range(pred_logits.shape[0]): + # if len(assign[b]) > 0: + # sem_cls_targets[b, assign[b][0]] = targets["gt_box_sem_cls_label"][b, assign[b][1]] + + # sem_cls_targets = sem_cls_targets.view(-1) + # pred_logits = pred_logits.reshape(sem_cls_targets.shape[0], -1) + # loss = F.cross_entropy(pred_logits, sem_cls_targets, self.semcls_percls_weights, reduction="mean") + + pred_logits = outputs['sem_cls_logits'] + gt_box_label = torch.gather(targets['gt_box_sem_cls_label'], 1, + assignments['per_prop_gt_inds']) + gt_box_label[assignments['proposal_matched_mask'].int() == 0] = ( + pred_logits.shape[-1] - 1) + loss = F.cross_entropy( + pred_logits.transpose(2, 1), + gt_box_label, + self.semcls_percls_weights, + reduction='mean', + ) + + return {'loss_sem_cls': loss} + + def loss_angle(self, outputs, targets, assignments): + angle_logits = outputs['angle_logits'] + angle_residual = outputs['angle_residual_normalized'] + + if targets['num_boxes_replica'] > 0: + gt_angle_label = targets['gt_angle_class_label'] + gt_angle_residual = targets['gt_angle_residual_label'] + gt_angle_residual_normalized = gt_angle_residual / ( + np.pi / self.dataset_config.num_angle_bin) + + # # Non vectorized version + # assignments = assignments["assignments"] + # p_angle_logits = [] + # p_angle_resid = [] + # t_angle_labels = [] + # t_angle_resid = [] + + # for b in range(angle_logits.shape[0]): + # if len(assignments[b]) > 0: + # p_angle_logits.append(angle_logits[b, assignments[b][0]]) + # p_angle_resid.append(angle_residual[b, assignments[b][0], gt_angle_label[b][assignments[b][1]]]) + # t_angle_labels.append(gt_angle_label[b, assignments[b][1]]) + # t_angle_resid.append(gt_angle_residual_normalized[b, assignments[b][1]]) + + # p_angle_logits = torch.cat(p_angle_logits) + # p_angle_resid = torch.cat(p_angle_resid) + # t_angle_labels = torch.cat(t_angle_labels) + # t_angle_resid = torch.cat(t_angle_resid) + + # angle_cls_loss = F.cross_entropy(p_angle_logits, t_angle_labels, reduction="sum") + # angle_reg_loss = huber_loss(p_angle_resid.flatten() - t_angle_resid.flatten()).sum() + + gt_angle_label = torch.gather(gt_angle_label, 1, + assignments['per_prop_gt_inds']) + angle_cls_loss = F.cross_entropy(angle_logits.transpose(2, 1), + gt_angle_label, + reduction='none') + angle_cls_loss = (angle_cls_loss * + assignments['proposal_matched_mask']).sum() + + gt_angle_residual_normalized = torch.gather( + gt_angle_residual_normalized, 1, + assignments['per_prop_gt_inds']) + gt_angle_label_one_hot = torch.zeros_like(angle_residual, + dtype=torch.float32) + gt_angle_label_one_hot.scatter_(2, gt_angle_label.unsqueeze(-1), 1) + + angle_residual_for_gt_class = torch.sum( + angle_residual * gt_angle_label_one_hot, -1) + angle_reg_loss = huber_loss(angle_residual_for_gt_class - + gt_angle_residual_normalized, + delta=1.0) + angle_reg_loss = (angle_reg_loss * + assignments['proposal_matched_mask']).sum() + + angle_cls_loss /= targets['num_boxes'] + angle_reg_loss /= targets['num_boxes'] + else: + angle_cls_loss = torch.zeros(1, + device=angle_logits.device).squeeze() + angle_reg_loss = torch.zeros(1, + device=angle_logits.device).squeeze() + return { + 'loss_angle_cls': angle_cls_loss, + 'loss_angle_reg': angle_reg_loss + } + + def loss_center(self, outputs, targets, assignments): + center_dist = outputs['center_dist'] + if targets['num_boxes_replica'] > 0: + + # # Non vectorized version + # assign = assignments["assignments"] + # center_loss = torch.zeros(1, device=center_dist.device).squeeze() + # for b in range(center_dist.shape[0]): + # if len(assign[b]) > 0: + # center_loss += center_dist[b, assign[b][0], assign[b][1]].sum() + + # select appropriate distances by using proposal to gt matching + center_loss = torch.gather( + center_dist, 2, + assignments['per_prop_gt_inds'].unsqueeze(-1)).squeeze(-1) + # zero-out non-matched proposals + center_loss = center_loss * assignments['proposal_matched_mask'] + center_loss = center_loss.sum() + + if targets['num_boxes'] > 0: + center_loss /= targets['num_boxes'] + else: + center_loss = torch.zeros(1, device=center_dist.device).squeeze() + + return {'loss_center': center_loss} + + def loss_giou(self, outputs, targets, assignments): + gious_dist = 1 - outputs['gious'] + + # # Non vectorized version + # giou_loss = torch.zeros(1, device=gious_dist.device).squeeze() + # assign = assignments["assignments"] + + # for b in range(gious_dist.shape[0]): + # if len(assign[b]) > 0: + # giou_loss += gious_dist[b, assign[b][0], assign[b][1]].sum() + + # select appropriate gious by using proposal to gt matching + giou_loss = torch.gather( + gious_dist, 2, + assignments['per_prop_gt_inds'].unsqueeze(-1)).squeeze(-1) + # zero-out non-matched proposals + giou_loss = giou_loss * assignments['proposal_matched_mask'] + giou_loss = giou_loss.sum() + + if targets['num_boxes'] > 0: + giou_loss /= targets['num_boxes'] + + return {'loss_giou': giou_loss} + + def loss_size(self, outputs, targets, assignments): + gt_box_sizes = targets['gt_box_sizes_normalized'] + pred_box_sizes = outputs['size_normalized'] + + if targets['num_boxes_replica'] > 0: + + # # Non vectorized version + # p_sizes = [] + # t_sizes = [] + # assign = assignments["assignments"] + # for b in range(pred_box_sizes.shape[0]): + # if len(assign[b]) > 0: + # p_sizes.append(pred_box_sizes[b, assign[b][0]]) + # t_sizes.append(gt_box_sizes[b, assign[b][1]]) + # p_sizes = torch.cat(p_sizes) + # t_sizes = torch.cat(t_sizes) + # size_loss = F.l1_loss(p_sizes, t_sizes, reduction="sum") + + # construct gt_box_sizes as [batch x nprop x 3] matrix by using proposal to gt matching + gt_box_sizes = torch.stack( + [ + torch.gather(gt_box_sizes[:, :, x], 1, + assignments['per_prop_gt_inds']) + for x in range(gt_box_sizes.shape[-1]) + ], + dim=-1, + ) + size_loss = F.l1_loss(pred_box_sizes, + gt_box_sizes, + reduction='none').sum(dim=-1) + + # zero-out non-matched proposals + size_loss *= assignments['proposal_matched_mask'] + size_loss = size_loss.sum() + + size_loss /= targets['num_boxes'] + else: + size_loss = torch.zeros(1, device=pred_box_sizes.device).squeeze() + return {'loss_size': size_loss} + + def single_output_forward(self, outputs, targets): + gious = generalized_box3d_iou( + outputs['box_corners'], + targets['gt_box_corners'], + targets['nactual_gt'], + rotated_boxes=torch.any(targets['gt_box_angles'] > 0).item(), + needs_grad=(self.loss_weight_dict['loss_giou_weight'] > 0), + ) + + outputs['gious'] = gious + center_dist = torch.cdist(outputs['center_normalized'], + targets['gt_box_centers_normalized'], + p=1) + outputs['center_dist'] = center_dist + assignments = self.matcher(outputs, targets) + + losses = {} + + for k in self.loss_functions: + loss_wt_key = k + '_weight' + if (loss_wt_key in self.loss_weight_dict + and self.loss_weight_dict[loss_wt_key] > 0 + ) or loss_wt_key not in self.loss_weight_dict: + # only compute losses with loss_wt > 0 + # certain losses like cardinality are only logged and have no loss weight + curr_loss = self.loss_functions[k](outputs, targets, + assignments) + losses.update(curr_loss) + + final_loss = 0 + for k in self.loss_weight_dict: + if self.loss_weight_dict[k] > 0: + losses[k.replace('_weight', '')] *= self.loss_weight_dict[k] + final_loss += losses[k.replace('_weight', '')] + return assignments, final_loss, losses + + def forward(self, outputs, targets): + nactual_gt = targets['gt_box_present'].sum(axis=1).long() + num_boxes = torch.clamp(all_reduce_average(nactual_gt.sum()), + min=1).item() + targets['nactual_gt'] = nactual_gt + targets['num_boxes'] = num_boxes + targets['num_boxes_replica'] = nactual_gt.sum().item( + ) # number of boxes on this worker for dist training + + assignments, loss, loss_dict = self.single_output_forward( + outputs['outputs'], targets) + + if 'aux_outputs' in outputs: + for k in range(len(outputs['aux_outputs'])): + _, interm_loss, interm_loss_dict = self.single_output_forward( + outputs['aux_outputs'][k], targets) + + loss += interm_loss + for interm_key in interm_loss_dict: + loss_dict[f'{interm_key}_{k}'] = interm_loss_dict[ + interm_key] + return assignments, loss, loss_dict + + +class VoteQueryCriterion(nn.Module): + + def __init__(self, cfgs): + super(VoteQueryCriterion, self).__init__() + self.loss_dict = { + 'loss_vote': (self.loss_vote, 1), + } + + def loss_vote(self, outputs: Dict, targets: Dict) -> Dict: + """Compute vote loss: Match predicted votes to GT votes. + + Args: + end_points: dict (read-only) + + Returns: + vote_loss: scalar Tensor + + Overall idea: + If the seed point belongs to an object (votes_label_mask == 1), + then we require it to vote for the object center. + + Each seed point may vote for multiple translations v1,v2,v3 + A seed point may also be in the boxes of multiple objects: + o1,o2,o3 with corresponding GT votes c1,c2,c3 + + Then the loss for this seed point is: + min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3 + """ + + # Load ground truth votes and assign them to seed points + batch, num_seed, _ = outputs['seed_xyz'].shape + vote_xyz = outputs['vote_xyz'] # B,num_seed*vote_factor,3 + seed_inds = outputs['seed_inds'].long( + ) # B,num_seed in [0,num_points-1] + + # Get groundtruth votes for the seed points + # vote_label_mask: Use gather to select B,num_seed from B,num_point + # non-object point has no GT vote mask = 0, object point has mask = 1 + # vote_label: Use gather to select B,num_seed,9 from B,num_point,9 + # with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3 + seed_gt_votes_mask = torch.gather(targets['vote_label_mask'], 1, + seed_inds) + seed_inds_expand = seed_inds.view(batch, num_seed, + 1).repeat(1, 1, 3 * GT_VOTE_FACTOR) + seed_gt_votes = torch.gather(targets['vote_label'], 1, + seed_inds_expand) + seed_gt_votes += outputs['seed_xyz'].repeat(1, 1, 3) + + # Compute the min of min of distance + vote_xyz_reshape = vote_xyz.view(batch * num_seed, -1, 3) + seed_gt_votes_reshape = seed_gt_votes.view(batch * num_seed, + GT_VOTE_FACTOR, 3) + # A predicted vote to no where is not penalized as long as there is a good vote near the GT vote. + dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, + seed_gt_votes_reshape, + l1=True) + votes_dist, _ = torch.min(dist2, dim=1) + votes_dist = votes_dist.view(batch, num_seed) + + loss = torch.sum(votes_dist * seed_gt_votes_mask.float()) / ( + torch.sum(seed_gt_votes_mask.float()) + 1e-6) + + return {'loss_vote': loss} + + def forward(self, outputs: Dict, targets: Dict) -> Dict: + # assignments = self.compute_label_assignment(outputs, targets) + + loss = torch.zeros(1)[0].to(targets['point_clouds'].device) + loss_dict = {} + loss_intermidiate = {} + for loss_name, (loss_fn, loss_weight) in self.loss_dict.items(): + + # loss_intermidiate = loss_fn(outputs, targets, assignments) + loss_intermidiate = loss_fn(outputs, targets) + loss_dict.update(loss_intermidiate) + + loss += loss_weight * loss_intermidiate[loss_name] + + loss *= 10 + + return loss, loss_intermidiate + + +class OverallCriterion(nn.Module): + + def __init__(self, args, dataset_config): + super(OverallCriterion, self).__init__() + matcher = Matcher( + cost_class=args.matcher_cls_cost, + cost_giou=args.matcher_giou_cost, + cost_center=args.matcher_center_cost, + cost_objectness=args.matcher_objectness_cost, + ) + loss_weight_dict = { + 'loss_giou_weight': args.loss_giou_weight, + 'loss_sem_cls_weight': args.loss_sem_cls_weight, + 'loss_no_object_weight': args.loss_no_object_weight, + 'loss_angle_cls_weight': args.loss_angle_cls_weight, + 'loss_angle_reg_weight': args.loss_angle_reg_weight, + 'loss_center_weight': args.loss_center_weight, + 'loss_size_weight': args.loss_size_weight, + } + self.set_prediction_loss = SetPredictionCriterion( + matcher, dataset_config, loss_weight_dict) + self.vote_query_loss = VoteQueryCriterion(args) + + def forward(self, votenet_outputs: Dict, decoder_outputs: Dict, + targets: Dict) -> Dict: + loss_dict = {} + votenet_loss, votenet_loss_dict = self.vote_query_loss( + votenet_outputs, targets) + assignments, set_loss, set_loss_dict = self.set_prediction_loss( + decoder_outputs, targets) + + loss_dict.update(votenet_loss_dict) + loss_dict.update(set_loss_dict) + + return assignments, votenet_loss + set_loss, loss_dict + + +def build_criterion(args, dataset_config): + return OverallCriterion(args, dataset_config) diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/detector.py b/models/LL3DA/models/detector_Vote2Cap_DETR/detector.py new file mode 100644 index 0000000..3653422 --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/detector.py @@ -0,0 +1,428 @@ +import math +import os +from functools import partial +from typing import Dict + +import numpy as np +import torch +import torch.nn as nn +from datasets.scannet import BASE +from models.detector_Vote2Cap_DETR.config import model_config +from models.detector_Vote2Cap_DETR.criterion import build_criterion +from models.detector_Vote2Cap_DETR.helpers import GenericMLP +from models.detector_Vote2Cap_DETR.position_embedding import \ + PositionEmbeddingCoordsSine +from models.detector_Vote2Cap_DETR.transformer import ( + MaskedTransformerEncoder, TransformerDecoder, TransformerDecoderLayer, + TransformerEncoder, TransformerEncoderLayer) +from models.detector_Vote2Cap_DETR.vote_query import VoteQuery +from third_party.pointnet2.pointnet2_modules import PointnetSAModuleVotes +from third_party.pointnet2.pointnet2_utils import furthest_point_sample +from utils.misc import huber_loss +from utils.pc_util import scale_points, shift_scale_points + + +class BoxProcessor(object): + """Class to convert 3DETR MLP head outputs into bounding boxes.""" + + def __init__(self, dataset_config): + self.dataset_config = dataset_config + + def compute_predicted_center(self, center_offset, query_xyz, + point_cloud_dims): + center_unnormalized = query_xyz + center_offset + center_normalized = shift_scale_points(center_unnormalized, + src_range=point_cloud_dims) + return center_normalized, center_unnormalized + + def compute_predicted_size(self, size_normalized, point_cloud_dims): + scene_scale = point_cloud_dims[1] - point_cloud_dims[0] + scene_scale = torch.clamp(scene_scale, min=1e-1) + size_unnormalized = scale_points(size_normalized, + mult_factor=scene_scale) + return size_unnormalized + + def compute_predicted_angle(self, angle_logits, angle_residual): + if angle_logits.shape[-1] == 1: + # special case for datasets with no rotation angle + # we still use the predictions so that model outputs are used + # in the backwards pass (DDP may complain otherwise) + angle = angle_logits * 0 + angle_residual * 0 + angle = angle.squeeze(-1).clamp(min=0) + else: + angle_per_cls = 2 * np.pi / self.dataset_config.num_angle_bin + pred_angle_class = angle_logits.argmax(dim=-1).detach() + angle_center = angle_per_cls * pred_angle_class + angle = angle_center + angle_residual.gather( + 2, pred_angle_class.unsqueeze(-1)).squeeze(-1) + mask = angle > np.pi + angle[mask] = angle[mask] - 2 * np.pi + return angle + + def compute_objectness_and_cls_prob(self, cls_logits): + assert cls_logits.shape[-1] == self.dataset_config.num_semcls + 1 + cls_prob = torch.nn.functional.softmax(cls_logits, dim=-1) + objectness_prob = 1 - cls_prob[..., -1] + return cls_prob[..., :-1], objectness_prob + + def box_parametrization_to_corners(self, box_center_unnorm, + box_size_unnorm, box_angle): + return self.dataset_config.box_parametrization_to_corners( + box_center_unnorm, box_size_unnorm, box_angle) + + +class Model_Vote2Cap_DETR(nn.Module): + + def __init__(self, + tokenizer, + encoder, + decoder, + dataset_config, + encoder_dim=256, + decoder_dim=256, + position_embedding='fourier', + mlp_dropout=0.3, + num_queries=256, + criterion=None): + super().__init__() + + self.tokenizer = tokenizer + self.encoder = encoder + + if hasattr(self.encoder, 'masking_radius'): + hidden_dims = [encoder_dim] + else: + hidden_dims = [encoder_dim, encoder_dim] + + self.encoder_to_decoder_projection = GenericMLP( + input_dim=encoder_dim, + hidden_dims=hidden_dims, + output_dim=decoder_dim, + norm_fn_name='bn1d', + activation='relu', + use_conv=True, + output_use_activation=True, + output_use_norm=True, + output_use_bias=False, + ) + self.pos_embedding = PositionEmbeddingCoordsSine( + d_pos=decoder_dim, pos_type=position_embedding, normalize=True) + + self.vote_query_generator = VoteQuery(decoder_dim, num_queries) + + self.query_projection = GenericMLP( + input_dim=decoder_dim, + hidden_dims=[decoder_dim], + output_dim=decoder_dim, + use_conv=True, + output_use_activation=True, + hidden_use_bias=True, + ) + + self.decoder = decoder + self.build_mlp_heads(dataset_config, decoder_dim, mlp_dropout) + + self.box_processor = BoxProcessor(dataset_config) + self.criterion = criterion + + def build_mlp_heads(self, dataset_config, decoder_dim, mlp_dropout): + mlp_func = partial( + GenericMLP, + norm_fn_name='bn1d', + activation='relu', + use_conv=True, + hidden_dims=[decoder_dim, decoder_dim], + dropout=mlp_dropout, + input_dim=decoder_dim, + ) + + # Semantic class of the box + # add 1 for background/not-an-object class + semcls_head = mlp_func(output_dim=dataset_config.num_semcls + 1) + + # geometry of the box + center_head = mlp_func(output_dim=3) + size_head = mlp_func(output_dim=3) + angle_cls_head = mlp_func(output_dim=dataset_config.num_angle_bin) + angle_reg_head = mlp_func(output_dim=dataset_config.num_angle_bin) + + mlp_heads = [ + ('sem_cls_head', semcls_head), + ('center_head', center_head), + ('size_head', size_head), + ('angle_cls_head', angle_cls_head), + ('angle_residual_head', angle_reg_head), + ] + self.mlp_heads = nn.ModuleDict(mlp_heads) + + def _break_up_pc(self, pc): + # pc may contain color/normals. + xyz = pc[..., 0:3].contiguous() + features = pc[..., 3:].transpose( + 1, 2).contiguous() if pc.size(-1) > 3 else None + return xyz, features + + def run_encoder(self, point_clouds): + xyz, features = self._break_up_pc(point_clouds) + + ## pointcloud tokenization + # xyz: batch x npoints x 3 + # features: batch x channel x npoints + # inds: batch x npoints + pre_enc_xyz, pre_enc_features, pre_enc_inds = self.tokenizer( + xyz, features) + + # nn.MultiHeadAttention in encoder expects npoints x batch x channel features + pre_enc_features = pre_enc_features.permute(2, 0, 1) + + # xyz points are in batch x npointx channel order + enc_xyz, enc_features, enc_inds = self.encoder(pre_enc_features, + xyz=pre_enc_xyz) + if enc_inds is None: + # encoder does not perform any downsampling + enc_inds = pre_enc_inds + else: + # use gather here to ensure that it works for both FPS and random sampling + enc_inds = torch.gather(pre_enc_inds, 1, enc_inds.long()) + return enc_xyz, enc_features, enc_inds + + def get_box_predictions(self, query_xyz, point_cloud_dims, box_features): + """ + Parameters: + query_xyz: batch x nqueries x 3 tensor of query XYZ coords + point_cloud_dims: List of [min, max] dims of point cloud + min: batch x 3 tensor of min XYZ coords + max: batch x 3 tensor of max XYZ coords + box_features: num_layers x num_queries x batch x channel + """ + # box_features change to (num_layers x batch) x channel x num_queries + box_features = box_features.permute(0, 2, 3, 1) + num_layers, batch, channel, num_queries = ( + box_features.shape[0], + box_features.shape[1], + box_features.shape[2], + box_features.shape[3], + ) + box_features = box_features.reshape(num_layers * batch, channel, + num_queries) + + # mlp head outputs are (num_layers x batch) x noutput x nqueries, so transpose last two dims + cls_logits = self.mlp_heads['sem_cls_head'](box_features).transpose( + 1, 2) + center_offset = (self.mlp_heads['center_head'] + (box_features).sigmoid().transpose(1, 2) - 0.5) + size_normalized = ( + self.mlp_heads['size_head'](box_features).sigmoid().transpose( + 1, 2)) + angle_logits = self.mlp_heads['angle_cls_head']( + box_features).transpose(1, 2) + angle_residual_normalized = self.mlp_heads['angle_residual_head']( + box_features).transpose(1, 2) + + # reshape outputs to num_layers x batch x nqueries x noutput + cls_logits = cls_logits.reshape(num_layers, batch, num_queries, -1) + center_offset = center_offset.reshape(num_layers, batch, num_queries, + -1) + size_normalized = size_normalized.reshape(num_layers, batch, + num_queries, -1) + angle_logits = angle_logits.reshape(num_layers, batch, num_queries, -1) + angle_residual_normalized = angle_residual_normalized.reshape( + num_layers, batch, num_queries, -1) + angle_residual = angle_residual_normalized * ( + np.pi / angle_residual_normalized.shape[-1]) + + outputs = [] + for l in range(num_layers): + # box processor converts outputs so we can get a 3D bounding box + ( + center_normalized, + center_unnormalized, + ) = self.box_processor.compute_predicted_center( + center_offset[l], query_xyz, point_cloud_dims) + angle_continuous = self.box_processor.compute_predicted_angle( + angle_logits[l], angle_residual[l]) + size_unnormalized = self.box_processor.compute_predicted_size( + size_normalized[l], point_cloud_dims) + box_corners = self.box_processor.box_parametrization_to_corners( + center_unnormalized, size_unnormalized, angle_continuous) + + # below are not used in computing loss (only for matching/mAP eval) + # we compute them with no_grad() so that distributed training does not complain about unused variables + with torch.no_grad(): + ( + semcls_prob, + objectness_prob, + ) = self.box_processor.compute_objectness_and_cls_prob( + cls_logits[l]) + + box_prediction = { + 'sem_cls_logits': cls_logits[l], + 'center_normalized': center_normalized.contiguous(), + 'center_unnormalized': center_unnormalized, + 'size_normalized': size_normalized[l], + 'size_unnormalized': size_unnormalized, + 'angle_logits': angle_logits[l], + 'angle_residual': angle_residual[l], + 'angle_residual_normalized': angle_residual_normalized[l], + 'angle_continuous': angle_continuous, + 'objectness_prob': objectness_prob, + 'sem_cls_prob': semcls_prob, + 'box_corners': box_corners, + } + outputs.append(box_prediction) + + # intermediate decoder layer outputs are only used during training + aux_outputs = outputs[:-1] + outputs = outputs[-1] + + return { + 'outputs': outputs, # output from last layer of decoder + 'aux_outputs': + aux_outputs, # output from intermediate layers of decoder + } + + def forward(self, inputs, is_eval: bool = False): + + # only need the pcd as input + + point_clouds = inputs['point_clouds'] + point_cloud_dims = [ + inputs['point_cloud_dims_min'], + inputs['point_cloud_dims_max'], + ] + + ## feature encoding + # encoder features: npoints x batch x channel -> batch x channel x npoints + enc_xyz, enc_features, enc_inds = self.run_encoder(point_clouds) + enc_features = enc_features.permute(1, 2, 0) + + ## vote query generation + query_outputs = self.vote_query_generator(enc_xyz, enc_features) + query_outputs['seed_inds'] = enc_inds + query_xyz = query_outputs['query_xyz'] + query_features = query_outputs['query_features'] + + ## decoding + pos_embed = self.pos_embedding(query_xyz, input_range=point_cloud_dims) + query_embed = self.query_projection(pos_embed) + + # batch x channel x npenc + enc_features = self.encoder_to_decoder_projection(enc_features) + enc_pos = self.pos_embedding(enc_xyz, input_range=point_cloud_dims) + + # decoder expects: npoints x batch x channel + enc_features = enc_features.permute(2, 0, 1) + enc_pos = enc_pos.permute(2, 0, 1) + query_embed = query_embed.permute(2, 0, 1) + tgt = query_features.permute(2, 0, 1) + + box_features = self.decoder( + tgt, enc_features, query_pos=query_embed, + pos=enc_pos)[0] # nlayers x nqueries x batch x channel + + box_predictions = self.get_box_predictions(query_xyz, point_cloud_dims, + box_features) + + if self.criterion is not None and is_eval is False: + (box_predictions['outputs']['assignments'], + box_predictions['outputs']['loss'], + _) = self.criterion(query_outputs, box_predictions, inputs) + + box_predictions['outputs'].update({ + 'prop_features': + box_features.permute(0, 2, 1, + 3), # nlayers x batch x nqueries x channel + 'enc_features': + enc_features.permute(1, 0, 2), # batch x npoints x channel + 'enc_xyz': + enc_xyz, # batch x npoints x 3 + 'query_xyz': + query_xyz, # batch x nqueries x 3 + }) + + return box_predictions['outputs'] + + +def build_preencoder(cfg): + mlp_dims = [cfg.in_channel, 64, 128, cfg.enc_dim] + preencoder = PointnetSAModuleVotes( + radius=0.2, + nsample=64, + npoint=cfg.preenc_npoints, + mlp=mlp_dims, + normalize_xyz=True, + ) + return preencoder + + +def build_encoder(cfg): + if cfg.enc_type == 'vanilla': + encoder_layer = TransformerEncoderLayer( + d_model=cfg.enc_dim, + nhead=cfg.enc_nhead, + dim_feedforward=cfg.enc_ffn_dim, + dropout=cfg.enc_dropout, + activation=cfg.enc_activation, + ) + encoder = TransformerEncoder(encoder_layer=encoder_layer, + num_layers=cfg.enc_nlayers) + elif cfg.enc_type in ['masked']: + encoder_layer = TransformerEncoderLayer( + d_model=cfg.enc_dim, + nhead=cfg.enc_nhead, + dim_feedforward=cfg.enc_ffn_dim, + dropout=cfg.enc_dropout, + activation=cfg.enc_activation, + ) + interim_downsampling = PointnetSAModuleVotes( + radius=0.4, + nsample=32, + npoint=cfg.preenc_npoints // 2, + mlp=[cfg.enc_dim, 256, 256, cfg.enc_dim], + normalize_xyz=True, + ) + + masking_radius = [math.pow(x, 2) for x in [0.4, 0.8, 1.2]] + encoder = MaskedTransformerEncoder( + encoder_layer=encoder_layer, + num_layers=3, + interim_downsampling=interim_downsampling, + masking_radius=masking_radius, + ) + else: + raise ValueError(f'Unknown encoder type {cfg.enc_type}') + return encoder + + +def build_decoder(cfg): + decoder_layer = TransformerDecoderLayer( + d_model=cfg.dec_dim, + nhead=cfg.dec_nhead, + dim_feedforward=cfg.dec_ffn_dim, + dropout=cfg.dec_dropout, + ) + decoder = TransformerDecoder(decoder_layer, + num_layers=cfg.dec_nlayers, + return_intermediate=True) + return decoder + + +def detector(args, dataset_config): + cfg = model_config(args, dataset_config) + + tokenizer = build_preencoder(cfg) + encoder = build_encoder(cfg) + decoder = build_decoder(cfg) + + criterion = build_criterion(cfg, dataset_config) + + model = Model_Vote2Cap_DETR(tokenizer, + encoder, + decoder, + cfg.dataset_config, + encoder_dim=cfg.enc_dim, + decoder_dim=cfg.dec_dim, + mlp_dropout=cfg.mlp_dropout, + num_queries=cfg.nqueries, + criterion=criterion) + return model diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/helpers.py b/models/LL3DA/models/detector_Vote2Cap_DETR/helpers.py new file mode 100644 index 0000000..e35763d --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/helpers.py @@ -0,0 +1,115 @@ +import copy +from functools import partial + +import torch.nn as nn + + +class BatchNormDim1Swap(nn.BatchNorm1d): + """Used for nn.Transformer that uses a HW x N x C rep.""" + + def forward(self, x): + """ + x: HW x N x C + permute to N x C x HW + Apply BN on C + permute back + """ + hw, n, c = x.shape + x = x.permute(1, 2, 0) + x = super(BatchNormDim1Swap, self).forward(x) + # x: n x c x hw -> hw x n x c + x = x.permute(2, 0, 1) + return x + + +NORM_DICT = { + 'bn': BatchNormDim1Swap, + 'bn1d': nn.BatchNorm1d, + 'id': nn.Identity, + 'ln': nn.LayerNorm, +} + +ACTIVATION_DICT = { + 'relu': nn.ReLU, + 'gelu': nn.GELU, + 'leakyrelu': partial(nn.LeakyReLU, negative_slope=0.1), +} + +WEIGHT_INIT_DICT = { + 'xavier_uniform': nn.init.xavier_uniform_, +} + + +class GenericMLP(nn.Module): + + def __init__( + self, + input_dim, + hidden_dims, + output_dim, + norm_fn_name=None, + activation='relu', + use_conv=False, + dropout=None, + hidden_use_bias=False, + output_use_bias=True, + output_use_activation=False, + output_use_norm=False, + weight_init_name=None, + ): + super().__init__() + activation = ACTIVATION_DICT[activation] + norm = None + if norm_fn_name is not None: + norm = NORM_DICT[norm_fn_name] + if norm_fn_name == 'ln' and use_conv: + norm = lambda x: nn.GroupNorm(1, x) # easier way to use LayerNorm + + if dropout is not None: + if not isinstance(dropout, list): + dropout = [dropout for _ in range(len(hidden_dims))] + + layers = [] + prev_dim = input_dim + for idx, x in enumerate(hidden_dims): + if use_conv: + layer = nn.Conv1d(prev_dim, x, 1, bias=hidden_use_bias) + else: + layer = nn.Linear(prev_dim, x, bias=hidden_use_bias) + layers.append(layer) + if norm: + layers.append(norm(x)) + layers.append(activation()) + if dropout is not None: + layers.append(nn.Dropout(p=dropout[idx])) + prev_dim = x + if use_conv: + layer = nn.Conv1d(prev_dim, output_dim, 1, bias=output_use_bias) + else: + layer = nn.Linear(prev_dim, output_dim, bias=output_use_bias) + layers.append(layer) + + if output_use_norm: + layers.append(norm(output_dim)) + + if output_use_activation: + layers.append(activation()) + + self.layers = nn.Sequential(*layers) + + if weight_init_name is not None: + self.do_weight_init(weight_init_name) + + def do_weight_init(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for (_, param) in self.named_parameters(): + if param.dim() > 1: # skips batchnorm/layernorm + func(param) + + def forward(self, x): + output = self.layers(x) + return output + + +def get_clones(module, N): + return nn.ModuleList([copy.deepcopy(module) for i in range(N)]) diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/position_embedding.py b/models/LL3DA/models/detector_Vote2Cap_DETR/position_embedding.py new file mode 100644 index 0000000..bf8c163 --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/position_embedding.py @@ -0,0 +1,139 @@ +"""Various positional encodings for the transformer.""" +import math + +import numpy as np +import torch +from torch import nn +from utils.pc_util import shift_scale_points + + +class PositionEmbeddingCoordsSine(nn.Module): + + def __init__( + self, + temperature=10000, + normalize=False, + scale=None, + pos_type='fourier', + d_pos=None, + d_in=3, + gauss_scale=1.0, + ): + super().__init__() + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError('normalize should be True if scale is passed') + if scale is None: + scale = 2 * math.pi + assert pos_type in ['sine', 'fourier'] + self.pos_type = pos_type + self.scale = scale + if pos_type == 'fourier': + assert d_pos is not None + assert d_pos % 2 == 0 + # define a gaussian matrix input_ch -> output_ch + B = torch.empty((d_in, d_pos // 2)).normal_() + B *= gauss_scale + self.register_buffer('gauss_B', B) + self.d_pos = d_pos + + def get_sine_embeddings(self, xyz, num_channels, input_range): + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + ndim = num_channels // xyz.shape[2] + if ndim % 2 != 0: + ndim -= 1 + # automatically handle remainder by assiging it to the first dim + rems = num_channels - (ndim * xyz.shape[2]) + + assert ( + ndim % 2 == 0 + ), f'Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}' + + final_embeds = [] + prev_dim = 0 + + for d in range(xyz.shape[2]): + cdim = ndim + if rems > 0: + # add remainder in increments of two to maintain even size + cdim += 2 + rems -= 2 + + if cdim != prev_dim: + dim_t = torch.arange(cdim, + dtype=torch.float32, + device=xyz.device) + dim_t = self.temperature**(2 * (dim_t // 2) / cdim) + + # create batch x cdim x nccords embedding + raw_pos = xyz[:, :, d] + if self.scale: + raw_pos *= self.scale + pos = raw_pos[:, :, None] / dim_t + pos = torch.stack((pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), + dim=3).flatten(2) + final_embeds.append(pos) + prev_dim = cdim + + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None): + # Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html + + if num_channels is None: + num_channels = self.gauss_B.shape[1] * 2 + + bsize, npoints = xyz.shape[0], xyz.shape[1] + assert num_channels > 0 and num_channels % 2 == 0 + d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1] + d_out = num_channels // 2 + assert d_out <= max_d_out + assert d_in == xyz.shape[-1] + + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + xyz *= 2 * np.pi + xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view( + bsize, npoints, d_out) + final_embeds = [xyz_proj.sin(), xyz_proj.cos()] + + # return batch x d_pos x npoints embedding + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def forward(self, xyz, num_channels=None, input_range=None): + assert isinstance(xyz, torch.Tensor) + assert xyz.ndim == 3 + # xyz is batch x npoints x 3 + if self.pos_type == 'sine': + with torch.no_grad(): + return self.get_sine_embeddings(xyz, num_channels, input_range) + elif self.pos_type == 'fourier': + with torch.no_grad(): + return self.get_fourier_embeddings(xyz, num_channels, + input_range) + else: + raise ValueError(f'Unknown {self.pos_type}') + + def extra_repr(self): + st = f'type={self.pos_type}, scale={self.scale}, normalize={self.normalize}' + if hasattr(self, 'gauss_B'): + st += ( + f', gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}' + ) + return st diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/transformer.py b/models/LL3DA/models/detector_Vote2Cap_DETR/transformer.py new file mode 100644 index 0000000..dffa648 --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/transformer.py @@ -0,0 +1,475 @@ +"""Modified from DETR Transformer class. + +Copy-paste from torch.nn.Transformer with modifications: + * positional encodings are passed in MHattention + * extra LN at the end of encoder is removed + * decoder returns a stack of activations from all decoding layers +""" +from typing import Optional + +import torch +from models.detector_Vote2Cap_DETR.helpers import (ACTIVATION_DICT, NORM_DICT, + WEIGHT_INIT_DICT, + get_clones) +from torch import Tensor, nn + + +class TransformerEncoder(nn.Module): + + def __init__(self, + encoder_layer, + num_layers, + norm=None, + weight_init_name='xavier_uniform'): + super().__init__() + self.layers = get_clones(encoder_layer, num_layers) + self.num_layers = num_layers + self.norm = norm + self._reset_parameters(weight_init_name) + + def _reset_parameters(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for p in self.parameters(): + if p.dim() > 1: + func(p) + + def forward( + self, + src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + xyz: Optional[Tensor] = None, + transpose_swap: Optional[bool] = False, + ): + if transpose_swap: + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + if pos is not None: + pos = pos.flatten(2).permute(2, 0, 1) + output = src + orig_mask = mask + if orig_mask is not None and isinstance(orig_mask, list): + assert len(orig_mask) == len(self.layers) + elif orig_mask is not None: + orig_mask = [mask for _ in range(len(self.layers))] + + for idx, layer in enumerate(self.layers): + if orig_mask is not None: + mask = orig_mask[idx] + # mask must be tiled to num_heads of the transformer + bsz, n, n = mask.shape + nhead = layer.nhead + mask = mask.unsqueeze(1) + mask = mask.repeat(1, nhead, 1, 1) + mask = mask.view(bsz * nhead, n, n) + output = layer(output, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + pos=pos) + + if self.norm is not None: + output = self.norm(output) + + if transpose_swap: + output = output.permute(1, 2, 0).view(bs, c, h, w).contiguous() + + xyz_inds = None + + return xyz, output, xyz_inds + + +class TransformerDecoder(nn.Module): + + def __init__(self, + decoder_layer, + num_layers, + norm_fn_name='ln', + return_intermediate=False, + weight_init_name='xavier_uniform'): + super().__init__() + self.layers = get_clones(decoder_layer, num_layers) + self.num_layers = num_layers + self.norm = None + if norm_fn_name is not None: + self.norm = NORM_DICT[norm_fn_name]( + self.layers[0].linear2.out_features) + self.return_intermediate = return_intermediate + self._reset_parameters(weight_init_name) + + def _reset_parameters(self, weight_init_name): + func = WEIGHT_INIT_DICT[weight_init_name] + for p in self.parameters(): + if p.dim() > 1: + func(p) + + def forward( + self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + transpose_swap: Optional[bool] = False, + return_attn_weights: Optional[bool] = False, + ): + if transpose_swap: + bs, c, h, w = memory.shape + memory = memory.flatten(2).permute( + 2, 0, 1) # memory: bs, c, t -> t, b, c + if pos is not None: + pos = pos.flatten(2).permute(2, 0, 1) + output = tgt + + intermediate = [] + attns = [] + + for layer in self.layers: + output, attn = layer( + output, + memory, + tgt_mask=tgt_mask, + memory_mask=memory_mask, + tgt_key_padding_mask=tgt_key_padding_mask, + memory_key_padding_mask=memory_key_padding_mask, + pos=pos, + query_pos=query_pos, + return_attn_weights=return_attn_weights) + if self.return_intermediate: + intermediate.append(self.norm(output)) + if return_attn_weights: + attns.append(attn) + + if self.norm is not None: + output = self.norm(output) + if self.return_intermediate: + intermediate.pop() + intermediate.append(output) + + if return_attn_weights: + attns = torch.stack(attns) + + if self.return_intermediate: + return torch.stack(intermediate), attns + + return output, attns + + +class MaskedTransformerEncoder(TransformerEncoder): + + def __init__(self, + encoder_layer, + num_layers, + masking_radius, + interim_downsampling, + norm=None, + weight_init_name='xavier_uniform'): + super().__init__(encoder_layer, + num_layers, + norm=norm, + weight_init_name=weight_init_name) + assert len(masking_radius) == num_layers + self.masking_radius = masking_radius + self.interim_downsampling = interim_downsampling + + def compute_mask(self, xyz, radius, dist=None): + with torch.no_grad(): + if dist is None or dist.shape[1] != xyz.shape[1]: + dist = torch.cdist(xyz, xyz, p=2) + # entries that are True in the mask do not contribute to self-attention + # so points outside the radius are not considered + mask = dist >= radius + return mask, dist + + def forward( + self, + src, + mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + xyz: Optional[Tensor] = None, + transpose_swap: Optional[bool] = False, + ): + + if transpose_swap: + bs, c, h, w = src.shape + src = src.flatten(2).permute(2, 0, 1) + if pos is not None: + pos = pos.flatten(2).permute(2, 0, 1) + + output = src + xyz_dist = None + xyz_inds = None + + for idx, layer in enumerate(self.layers): + mask = None + if self.masking_radius[idx] > 0: + mask, xyz_dist = self.compute_mask(xyz, + self.masking_radius[idx], + xyz_dist) + # mask must be tiled to num_heads of the transformer + bsz, n, n = mask.shape + nhead = layer.nhead + mask = mask.unsqueeze(1) + mask = mask.repeat(1, nhead, 1, 1) + mask = mask.view(bsz * nhead, n, n) + + output = layer(output, + src_mask=mask, + src_key_padding_mask=src_key_padding_mask, + pos=pos) + + if idx == 0 and self.interim_downsampling: + # output is npoints x batch x channel. make batch x channel x npoints + output = output.permute(1, 2, 0) + xyz, output, xyz_inds = self.interim_downsampling(xyz, output) + # swap back + output = output.permute(2, 0, 1) + + if self.norm is not None: + output = self.norm(output) + + if transpose_swap: + output = output.permute(1, 2, 0).view(bs, c, h, w).contiguous() + + return xyz, output, xyz_inds + + def extra_repr(self): + radius_str = ', '.join(['%.2f' % (x) for x in self.masking_radius]) + return f'masking_radius={radius_str}' + + +class TransformerEncoderLayer(nn.Module): + + def __init__(self, + d_model, + nhead=4, + dim_feedforward=128, + dropout=0.1, + dropout_attn=None, + activation='relu', + normalize_before=True, + norm_name='ln', + use_ffn=True, + ffn_use_bias=True): + super().__init__() + if dropout_attn is None: + dropout_attn = dropout + self.self_attn = nn.MultiheadAttention(d_model, + nhead, + dropout=dropout_attn) + self.use_ffn = use_ffn + if self.use_ffn: + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, + dim_feedforward, + bias=ffn_use_bias) + self.dropout = nn.Dropout(dropout, inplace=False) + self.linear2 = nn.Linear(dim_feedforward, + d_model, + bias=ffn_use_bias) + self.norm2 = NORM_DICT[norm_name](d_model) + self.norm2 = NORM_DICT[norm_name](d_model) + self.dropout2 = nn.Dropout(dropout, inplace=False) + + self.norm1 = NORM_DICT[norm_name](d_model) + self.dropout1 = nn.Dropout(dropout, inplace=False) + + self.activation = ACTIVATION_DICT[activation]() + self.normalize_before = normalize_before + self.nhead = nhead + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None): + q = k = self.with_pos_embed(src, pos) + value = src + src2 = self.self_attn(q, + k, + value=value, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask)[0] + src = src + self.dropout1(src2) + if self.use_norm_fn_on_input: + src = self.norm1(src) + if self.use_ffn: + src2 = self.linear2( + self.dropout(self.activation(self.linear1(src)))) + src = src + self.dropout2(src2) + src = self.norm2(src) + return src + + def forward_pre(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + return_attn_weights: Optional[Tensor] = False): + + src2 = self.norm1(src) + value = src2 + q = k = self.with_pos_embed(src2, pos) + src2, attn_weights = self.self_attn( + q, + k, + value=value, + attn_mask=src_mask, + key_padding_mask=src_key_padding_mask) + src = src + self.dropout1(src2) + if self.use_ffn: + src2 = self.norm2(src) + src2 = self.linear2( + self.dropout(self.activation(self.linear1(src2)))) + src = src + self.dropout2(src2) + if return_attn_weights: + return src, attn_weights + return src + + def forward(self, + src, + src_mask: Optional[Tensor] = None, + src_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + return_attn_weights: Optional[Tensor] = False): + if self.normalize_before: + return self.forward_pre(src, src_mask, src_key_padding_mask, pos, + return_attn_weights) + return self.forward_post(src, src_mask, src_key_padding_mask, pos) + + def extra_repr(self): + st = '' + if hasattr(self.self_attn, 'dropout'): + st += f'attn_dr={self.self_attn.dropout}' + return st + + +class TransformerDecoderLayer(nn.Module): + + def __init__(self, + d_model, + nhead=4, + dim_feedforward=256, + dropout=0.1, + dropout_attn=None, + activation='relu', + normalize_before=True, + norm_fn_name='ln'): + super().__init__() + if dropout_attn is None: + dropout_attn = dropout + self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout) + self.multihead_attn = nn.MultiheadAttention(d_model, + nhead, + dropout=dropout) + + self.norm1 = NORM_DICT[norm_fn_name](d_model) + self.norm2 = NORM_DICT[norm_fn_name](d_model) + + self.norm3 = NORM_DICT[norm_fn_name](d_model) + self.dropout1 = nn.Dropout(dropout, inplace=False) + self.dropout2 = nn.Dropout(dropout, inplace=False) + self.dropout3 = nn.Dropout(dropout, inplace=False) + + # Implementation of Feedforward model + self.linear1 = nn.Linear(d_model, dim_feedforward) + self.dropout = nn.Dropout(dropout, inplace=False) + self.linear2 = nn.Linear(dim_feedforward, d_model) + + self.activation = ACTIVATION_DICT[activation]() + self.normalize_before = normalize_before + + def with_pos_embed(self, tensor, pos: Optional[Tensor]): + return tensor if pos is None else tensor + pos + + def forward_post(self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + return_attn_weights: Optional[bool] = False): + q = k = self.with_pos_embed(tgt, query_pos) + tgt2 = self.self_attn(q, + k, + value=tgt, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt = self.norm1(tgt) + tgt2, attn = self.multihead_attn( + query=self.with_pos_embed(tgt, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask) + tgt = tgt + self.dropout2(tgt2) + tgt = self.norm2(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt)))) + tgt = tgt + self.dropout3(tgt2) + tgt = self.norm3(tgt) + if return_attn_weights: + return tgt, attn + return tgt, None + + def forward_pre(self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + return_attn_weights: Optional[bool] = False): + tgt2 = self.norm1(tgt) + q = k = self.with_pos_embed(tgt2, query_pos) + tgt2 = self.self_attn(q, + k, + value=tgt2, + attn_mask=tgt_mask, + key_padding_mask=tgt_key_padding_mask)[0] + tgt = tgt + self.dropout1(tgt2) + tgt2 = self.norm2(tgt) + tgt2, attn = self.multihead_attn( + query=self.with_pos_embed(tgt2, query_pos), + key=self.with_pos_embed(memory, pos), + value=memory, + attn_mask=memory_mask, + key_padding_mask=memory_key_padding_mask) + tgt = tgt + self.dropout2(tgt2) + tgt2 = self.norm3(tgt) + tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2)))) + tgt = tgt + self.dropout3(tgt2) + if return_attn_weights: + return tgt, attn + return tgt, None + + def forward(self, + tgt, + memory, + tgt_mask: Optional[Tensor] = None, + memory_mask: Optional[Tensor] = None, + tgt_key_padding_mask: Optional[Tensor] = None, + memory_key_padding_mask: Optional[Tensor] = None, + pos: Optional[Tensor] = None, + query_pos: Optional[Tensor] = None, + return_attn_weights: Optional[bool] = False): + if self.normalize_before: + return self.forward_pre(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, + memory_key_padding_mask, pos, query_pos, + return_attn_weights) + return self.forward_post(tgt, memory, tgt_mask, memory_mask, + tgt_key_padding_mask, memory_key_padding_mask, + pos, query_pos, return_attn_weights) diff --git a/models/LL3DA/models/detector_Vote2Cap_DETR/vote_query.py b/models/LL3DA/models/detector_Vote2Cap_DETR/vote_query.py new file mode 100644 index 0000000..28d4fa4 --- /dev/null +++ b/models/LL3DA/models/detector_Vote2Cap_DETR/vote_query.py @@ -0,0 +1,63 @@ +import third_party.pointnet2.pointnet2_utils as pointnet2_utils +import torch +import torch.nn as nn +import torch.nn.functional as F +from third_party.pointnet2.pointnet2_modules import PointnetSAModuleVotes + + +class VoteQuery(nn.Module): + + def __init__(self, d_model, nqueries): + + super().__init__() + self.nqueries = nqueries + + self.FFN_vote = nn.Sequential( + nn.Conv1d(d_model, d_model, 1), + nn.BatchNorm1d(d_model), + nn.ReLU(), + nn.Conv1d(d_model, d_model, 1), + nn.BatchNorm1d(d_model), + nn.ReLU(), + nn.Conv1d(d_model, 3 + d_model, 1), + ) + + self.set_abstraction = PointnetSAModuleVotes( + npoint=nqueries, + radius=0.3, + nsample=16, + mlp=[d_model, d_model, d_model, d_model], + use_xyz=True, + normalize_xyz=True) + + def forward(self, encode_xyz, encode_features): + """Forward pass. + + Arguments: + seed_xyz: (batch_size, num_seed, 3) Pytorch tensor + seed_features: (batch_size, feature_dim, num_seed) Pytorch tensor + Returns: + vote_xyz: (batch_size, num_seed*vote_factor, 3) + vote_features: (batch_size, vote_feature_dim, num_seed*vote_factor) + """ + + # batch, channel, npoints + out = self.FFN_vote(encode_features) + vote_xyz = encode_xyz + out[:, :3, :].permute(0, 2, 1) + encode_features = encode_features + out[:, 3:, :] + + features_norm = torch.norm(encode_features, p=2, dim=1) + encode_features = encode_features.div(features_norm.unsqueeze(1)) + + sample_inds = pointnet2_utils.furthest_point_sample( + encode_xyz, self.nqueries) + + query_xyz, query_features, _ = self.set_abstraction( + vote_xyz, encode_features, sample_inds) + + return { + 'vote_xyz': vote_xyz, # batch x npenc x 3 + 'seed_xyz': encode_xyz, # batch x npenc x 3 + 'query_xyz': query_xyz, # batch x npenc x 3 + 'query_features': query_features # batch x channel x npquery + } diff --git a/models/LL3DA/models/ll3da/captioner.py b/models/LL3DA/models/ll3da/captioner.py new file mode 100644 index 0000000..db1e068 --- /dev/null +++ b/models/LL3DA/models/ll3da/captioner.py @@ -0,0 +1,549 @@ +import copy +import importlib +import math +from collections import OrderedDict +from typing import Dict + +import torch +import torch.nn.functional as nnf +from models.ll3da.generation_utils import generation +from models.ll3da.position_embedding import PositionEmbeddingCoordsSine +from torch import Tensor, nn +from transformers import (AutoModelForCausalLM, AutoTokenizer, + InstructBlipQFormerConfig, InstructBlipQFormerModel) +from utils.box_util import box3d_iou_batch_tensor + + +def proposal_dimension_select(features: Tensor, indices: Tensor) -> Tensor: + ''' + + Parameters + ---------- + features : Tensor, with size [batch x nsrc x ...] + Data bank, from which to gather information. + indices : Tensor, with size [batch x ntgt] + Indices for gathering information from data bank. + + Returns + ------- + Tensor, with size [batch x ntgt x ...] + Gathers features in proposal dimension. + + ''' + return torch.gather( + features, 1, + indices.reshape(*(indices.shape + + tuple(1 for _ in features.shape[2:]))).repeat( + *((1, 1) + features.shape[2:]))) + + +def select_proposal_feature(prop_features: Tensor, prop_box_corners: Tensor, + prop_sem_mask: Tensor, + box_query: Tensor) -> Tensor: + ''' + + Parameters + ---------- + prop_features : Tensor, with size [batch x nproposal x n_embd] + prop_box_corners : Tensor, with size [batch x nproposal x 8 x 3] + prop_sem_mask : Tensor, with size [batch x nproposal], 0 for background + box_query : Tensor, with size [batch x nquery x 8 x 3] + + Returns + ------- + Tensor, with size [batch x nquery x n_embd] + Gathers features in proposal dimension. + + ''' + # Here is a box-matching, match the input box prompt with the detector output and get the feature of the matched proposal. + + # prop_features + batch_size, nproposal, _, _ = prop_box_corners.shape + nquery = box_query.shape[1] + + # generate a matching matrix + matched_box_iou = box3d_iou_batch_tensor( + prop_box_corners.unsqueeze(1).repeat(1, nquery, 1, 1, + 1).reshape(-1, 8, 3), + box_query.unsqueeze(2).repeat(1, 1, nproposal, 1, 1).reshape(-1, 8, 3)) + matched_box_iou = matched_box_iou.reshape(batch_size, nquery, nproposal) + matched_box_iou = matched_box_iou * prop_sem_mask.unsqueeze(1) + + matched_indices = matched_box_iou.argmax(-1) # batch x nquery + return proposal_dimension_select(prop_features, matched_indices) + + +class PromptEncoder(nn.Module): + + def __init__(self, encoder_hidden_size, visual_nquery, qformer_hidden_size, + n_embd): + super(PromptEncoder, self).__init__() + self.n_embd = n_embd + self.visual_nquery = visual_nquery + self.qformer_hidden_size = qformer_hidden_size + self.encoder_hidden_size = encoder_hidden_size + + self.box_prompt_projector = nn.Sequential( + nn.Linear(encoder_hidden_size, qformer_hidden_size), + nn.ReLU(), + nn.Linear(qformer_hidden_size, + visual_nquery * qformer_hidden_size), + ) + self.click_prompt_projector = nn.Sequential( + nn.Linear(encoder_hidden_size, qformer_hidden_size), + nn.ReLU(), + nn.Linear(qformer_hidden_size, + visual_nquery * qformer_hidden_size), + ) + self.pos_emb3d = PositionEmbeddingCoordsSine(d_pos=encoder_hidden_size, + pos_type='fourier', + normalize=True) + + def expand_prompt_representation(self, + prompt_feature: Tensor, + prompt_mask: Tensor = None): + # input: + # prompt_feature: batch x nprompt x (ntkn x channel) + # prompt_mask: batch x nprompt + # output: + # prompt_feature: batch x (nprompt x ntkn) x channel + # prompt_mask: batch x (nprompt x ntkn) + batch_size, nprompt = prompt_feature.shape[:2] + if prompt_mask is None: + prompt_mask = torch.ones_like(prompt_feature[..., 0]) + prompt_mask = prompt_mask.unsqueeze(-1).repeat(1, 1, + self.visual_nquery) + prompt_mask = prompt_mask.reshape(batch_size, + nprompt * self.visual_nquery) + prompt_feature = prompt_feature.reshape(batch_size, nprompt, + self.visual_nquery, + self.qformer_hidden_size) + prompt_feature = prompt_feature.reshape(batch_size, + nprompt * self.visual_nquery, + self.qformer_hidden_size) + return prompt_feature, prompt_mask + + def forward(self, + detector_output, + point_cloud_dims, + box_query=None, + box_qmask=None, + click_query=None, + click_qmask=None): + sem_cls_logits = detector_output['sem_cls_logits'] + prop_sem_mask = (sem_cls_logits.argmax(-1) != + (sem_cls_logits.shape[-1] - 1)).float() + + net_device = sem_cls_logits.device + batch_size = sem_cls_logits.shape[0] + + ### prompt encoding + # box prompt encoding + visual_prompt = [ + torch.zeros(batch_size, 0, self.qformer_hidden_size).to(net_device) + ] + visual_mask = [torch.zeros(batch_size, 0).to(net_device)] + if box_query is not None: + box_prompt = select_proposal_feature( + detector_output['prop_features'][-1], + detector_output['box_corners'], prop_sem_mask, box_query) + box_prompt = self.box_prompt_projector(box_prompt) + box_prompt, box_qmask = self.expand_prompt_representation( + box_prompt, box_qmask) + visual_prompt.append(box_prompt) + visual_mask.append(box_qmask) + + # click prompt encoding: batch x nquery x nproposal + if click_query is not None: + click_xyz = click_query # batch x nquery x 3 + click_prompt = self.pos_emb3d(click_xyz, + input_range=point_cloud_dims) + click_prompt = self.click_prompt_projector( + click_prompt.permute(0, 2, 1)) + click_prompt, click_qmask = self.expand_prompt_representation( + click_prompt, click_qmask) + visual_prompt.append(click_prompt) + visual_mask.append(click_qmask) + + ## concat box and click prompts as well as prompt masks + prompt_feature = torch.cat(visual_prompt, + dim=1) # batch x (2 x ntoken) x channel + prompt_mask = torch.cat(visual_mask, dim=1) # batch x (2 x ntoken) + + return prompt_feature, prompt_mask + + +class captioner(nn.Module): + + def train(self, mode: bool = True): + super().train(mode) + if self.freeze_llm is True: + self.transformer.eval() + for param in self.transformer.parameters(): + param.requires_grad = False + return self + + def __init__(self, args, train_dataset): + super(captioner, self).__init__() + + self.encoder_hidden_size = 256 + self.dtype = torch.float16 + self.visual_nquery = 8 + self.nlatent_query = 32 + self.freeze_llm = args.freeze_llm + + ## initialize tokenizer for batch decoding + self.tokenizer = AutoTokenizer.from_pretrained(args.vocab) + self.nvocabs = len(self.tokenizer) + + ## caption generation cores + self.transformer = AutoModelForCausalLM.from_pretrained( + args.vocab, torch_dtype=self.dtype) + self.n_embd = self.transformer.config.hidden_size + + ## Multi-modality Transformer + qformer_config = InstructBlipQFormerConfig( + num_hidden_layers=6, encoder_hidden_size=self.encoder_hidden_size) + self.qformer = InstructBlipQFormerModel.from_pretrained( + args.qformer_vocab, config=qformer_config) + self.qformer_hidden_size = qformer_config.hidden_size + + ## for prompt feature projection + self.encoder_to_qformer_projection = nn.Sequential( + nn.Linear(self.encoder_hidden_size, + qformer_config.encoder_hidden_size), + nn.ReLU(), + nn.Linear(qformer_config.encoder_hidden_size, + qformer_config.encoder_hidden_size), + nn.ReLU(), + ) + self.prompt_encoder = PromptEncoder(self.encoder_hidden_size, + self.visual_nquery, + self.qformer_hidden_size, + self.n_embd) + self.latent_query = nn.Embedding(self.nlatent_query, + self.qformer_hidden_size) + self.qformer_to_language_projection = nn.Linear( + self.qformer_hidden_size, self.n_embd) + + self.max_gen_per_iter = 8 + ## ---- super parameters for evaluation + self.caption_config = { + 'early_stopping': True, + 'eos_token_id': self.tokenizer.eos_token_id, + 'num_beams': 4 if args.use_beam_search is True else None, + 'repetition_penalty': 2.5 + } + self.train() + + def _get_instruction_response(self, + detector_output: dict, + inputs: dict, + box_query: Tensor = None, + box_qmask: Tensor = None, + click_query: Tensor = None, + click_qmask: Tensor = None) -> dict: + + point_cloud_dims = [ + inputs['point_cloud_dims_min'], + inputs['point_cloud_dims_max'], + ] + net_device = inputs['point_clouds'].device + batch_size = inputs['point_clouds'].shape[0] + + # here we input the feature of the whole scene + encoder_hidden_states = detector_output['enc_features'] + + ## prompt encoding + + prompt_feature, prompt_mask = self.prompt_encoder( + detector_output, + point_cloud_dims, + box_query=box_query, + box_qmask=box_qmask, + click_query=click_query, + click_qmask=click_qmask) + + ## gather query feature for qformer: batch x (n_query + n_tokens) x n_embd + query_tokens = self.latent_query.weight.unsqueeze(0).repeat( + batch_size, 1, 1) + query_tokens = torch.cat((query_tokens, prompt_feature), dim=1) + query_attention_mask = torch.cat((torch.ones( + batch_size, self.nlatent_query).to(net_device), prompt_mask), + dim=1) + + # prepare qformer inputs: batch x ntoken x n_embd + query_attention_mask = torch.cat( + (query_attention_mask, inputs['qformer_attention_mask']), dim=1) + + query_outputs = self.qformer( + input_ids=inputs['qformer_input_ids'], + attention_mask=query_attention_mask, + query_embeds=query_tokens, + encoder_hidden_states=self.encoder_to_qformer_projection( + encoder_hidden_states), + ) + query_outputs = query_outputs[0][:, :self.nlatent_query, :] + prefix_feature = self.qformer_to_language_projection(query_outputs) + + return prefix_feature + + def forward(self, + detector_output: dict, + inputs: dict, + is_eval: bool = False, + task_name: str = 'qa') -> dict: + + if is_eval is False: + return self.forward_training(detector_output, inputs) + + response_config = { + 'ov-det': 64, + 'dense-cap': 48, + 'qa': 256, + 'chat': 512, + } + max_gen_length = response_config[task_name] + + if task_name in {'ov-det', 'dense-cap'}: + return self.predict_densecap(detector_output, + inputs, + task_name, + max_gen_length=max_gen_length) + elif task_name == 'qa': + return self.predict_answer(detector_output, + inputs, + max_gen_length=max_gen_length) + else: + return self.predict_chat(detector_output, + inputs, + max_gen_length=max_gen_length) + + def forward_training(self, detector_output: Dict, inputs: Dict) -> Dict: + # get word embeddings, NOTE: captioner does not predict token + input_ids = inputs['input_ids'] # batch x ntokens + input_mask = inputs['attention_mask'] # batch x ntokens + gradient_mask = inputs['gradient_mask'] # batch x ntokens + + box_query = inputs.get('box_query', None) # batch x nquery x 8 x 3 + box_qmask = inputs.get('box_mask', None) # batch x nquery + click_query = inputs.get('click_query', None) # batch x nquery x 3 + click_qmask = inputs.get('click_mask', None) # batch x nquery + + embedding_layer = self.transformer.get_input_embeddings() + + # ---- batch x ntoken x n_embd + prefix_tokens = self._get_instruction_response( + detector_output=detector_output, + inputs=inputs, + box_query=box_query, + box_qmask=box_qmask, + click_query=click_query, + click_qmask=click_qmask) + prefix_mask = torch.ones_like(prefix_tokens[..., 0]) + # ---- batch x (ntoken + nword) x n_embd + inputs_embeds = torch.cat((prefix_tokens, embedding_layer(input_ids)), + dim=1) + attention_mask = torch.cat((prefix_mask, input_mask), dim=1) + + # ---- calculate transformer loss + outputs = self.transformer( + inputs_embeds=inputs_embeds.to(self.dtype), + attention_mask=attention_mask.to(self.dtype), + ) + + detector_output['loss'] += self.loss_caption( + logits=outputs.logits[:, prefix_tokens.shape[1] - 1:-1], + target=input_ids, + mask=gradient_mask.to(self.dtype), + ) + return detector_output + + def loss_caption(self, logits: Tensor, target: Tensor, + mask: Tensor) -> Tensor: + loss_per_word = nnf.cross_entropy( + logits.permute(0, 2, 1).contiguous(), + target, + reduction='none', + ) + final_loss = torch.sum(loss_per_word * mask) / torch.sum(mask + 1e-6) + # parameter activation for multi-gpu training + for param in self.parameters(): + if param.requires_grad: + final_loss += 0 * torch.sum(param.to(final_loss.dtype)**2) + return final_loss + + def predict_densecap(self, + detector_output: Dict, + inputs: Dict, + task_name: str, + max_gen_length: int = 64) -> Dict: + + # ---- necessary elements + embedding_layer = self.transformer.get_input_embeddings() + net_device = next(self.parameters()).device + batch_size, nproposals, _, _ = detector_output['box_corners'].shape + # ---- to store llm outputs + output_ids = torch.ones(batch_size, nproposals, + max_gen_length).long().to(net_device) + output_ids = output_ids * self.tokenizer.eos_token_id + + # ---- llm input preparation + instruction = inputs['instruction'][0] # ntoken + instruction_mask = inputs['instruction_mask'][0] # ntoken + instruction_id = instruction[instruction_mask == 1] # ntoken + instruction_id = instruction_id[None, :].repeat(batch_size, 1) + instruction_embedding = embedding_layer( + instruction_id) # batch x ntoken x n_embd + + prefix_tokens = [] + for proposal_id in range(nproposals): + box_query = detector_output['box_corners'][:, + [proposal_id + ]] # batch x 1 x 8 x 3 + + click_query = None + if task_name == 'ov-det': + click_query = detector_output['query_xyz'][:, + [proposal_id + ]] # batch x 1 x 3 + + instruct_prefix_feature = self._get_instruction_response( # batch x ntoken x n_embd + detector_output=detector_output, + inputs=inputs, + box_query=box_query, # batch x 1 x 8 x 3 + click_query=click_query, + ) + instruct_prefix_feature = torch.cat( + (instruct_prefix_feature, instruction_embedding), dim=1) + prefix_tokens.append(instruct_prefix_feature.unsqueeze(1)) + # batch x nproposal x 1 x n_embd + prefix_tokens = torch.cat(prefix_tokens, dim=1).to(self.dtype) + + ## filter and rank the queries + sem_cls_logits = detector_output['sem_cls_logits'] + objectness_mask = sem_cls_logits.argmax(-1) != ( + sem_cls_logits.shape[-1] - 1) + + ## limit the proposals for generating captions + candidate_prefix = prefix_tokens[objectness_mask].to(self.dtype) + + gather_output_ids = [] + for start_idx in range(0, candidate_prefix.shape[0], + self.max_gen_per_iter): + prefix = candidate_prefix[start_idx:start_idx + + self.max_gen_per_iter] + scene_cap_output = generation(self.transformer, + inputs_embeds=prefix, + max_length=max_gen_length, + **self.caption_config) + gather_output_ids.append(scene_cap_output['output_ids']) + gather_output_ids = torch.cat(gather_output_ids, dim=0) + + output_ids[objectness_mask] = gather_output_ids + detector_output['output_ids'] = output_ids + + return detector_output + + def predict_answer(self, + detector_output: Dict, + inputs: Dict, + max_gen_length: int = 8) -> Dict: + + # ---- necessary elements + embedding_layer = self.transformer.get_input_embeddings() + net_device = next(self.parameters()).device + # ---- to store llm outputs + output_ids = [] + + # ---- llm input preparation + instruction = inputs['instruction'] # ntoken + instruction_mask = inputs['instruction_mask'] # ntoken + + prefix_tokens = self._get_instruction_response( + detector_output=detector_output, + inputs=inputs, + ) + prefix_tokens = prefix_tokens.to(self.dtype) + + for batch_id in range(prefix_tokens.shape[0]): + sample_instruction = instruction[batch_id] + sample_mask = instruction_mask[batch_id] # ntoken + + output = generation( + self.transformer, + inputs_embeds=torch.cat( + [ + prefix_tokens[batch_id].unsqueeze( + 0), # 1 x nprefix x n_embd + embedding_layer( + sample_instruction[sample_mask == 1]).unsqueeze(0) + ], + dim=1), + max_new_tokens=max_gen_length, + do_sample=True, + top_k=50, + top_p=0.95, + no_repeat_ngram_size=4, + num_return_sequences=1, + **self.caption_config) + output_ids.append(output['output_ids']) + + output_ids = torch.cat(output_ids, dim=0) + detector_output['output_ids'] = output_ids + + return detector_output + + def predict_chat(self, + detector_output: Dict, + inputs: Dict, + max_gen_length: int = 512) -> Dict: + + # ---- necessary elements + embedding_layer = self.transformer.get_input_embeddings() + net_device = next(self.parameters()).device + # ---- to store llm outputs + output_ids = [] + + # ---- llm input preparation + instruction = inputs['instruction'] # ntoken + instruction_mask = inputs['instruction_mask'] # ntoken + + prefix_tokens = self._get_instruction_response( + detector_output=detector_output, + inputs=inputs, + ) + prefix_tokens = prefix_tokens.to(self.dtype) + + for batch_id in range(prefix_tokens.shape[0]): + sample_instruction = instruction[batch_id] + sample_mask = instruction_mask[batch_id] # ntoken + + output = self.transformer.generate( + inputs_embeds=torch.cat( + [ + prefix_tokens[batch_id].unsqueeze( + 0), # 1 x nprefix x n_embd + embedding_layer( + sample_instruction[sample_mask == 1]).unsqueeze(0) + ], + dim=1), + max_new_tokens=max_gen_length, + do_sample=True, + top_k=50, + top_p=0.95, + no_repeat_ngram_size=4, + num_return_sequences=1, + ) # 1 x max_gen_length + output = output.squeeze(0) + placeholder = torch.ones(max_gen_length).to( + net_device) * self.tokenizer.eos_token_id + output = output[:min(max_gen_length, output.shape[0])] + placeholder[:output.shape[0]] = output + + output_ids.append(placeholder.unsqueeze(0).long()) + + output_ids = torch.cat(output_ids, dim=0) + detector_output['output_ids'] = output_ids + + return detector_output diff --git a/models/LL3DA/models/ll3da/generation_utils.py b/models/LL3DA/models/ll3da/generation_utils.py new file mode 100644 index 0000000..4fbceae --- /dev/null +++ b/models/LL3DA/models/ll3da/generation_utils.py @@ -0,0 +1,215 @@ +import heapq +import time +from collections import OrderedDict +from typing import Callable + +import torch +from torch import Tensor + + +def greedy_decode(transformer: Callable, **kwargs) -> Tensor: + + ## prepare inputs + max_length = kwargs['max_length'] + inputs_embeds = kwargs['inputs_embeds'] # batch x nwords x channel + + batch, _, channel = inputs_embeds.shape + + ## prepare storage + output_ids = torch.ones(batch, max_length).long().to(inputs_embeds.device) + output_ids = output_ids * kwargs['eos_token_id'] + + ## prepare temporal storage of inputs + temporal_inputs = inputs_embeds + finished_batchs = torch.zeros(batch).bool().to(inputs_embeds.device) + embedding_layer = transformer.get_input_embeddings() + for word_id in range(max_length): + + step_output = transformer(inputs_embeds=temporal_inputs, ) + + ## greedy decoding, find out whats the most possible word + next_word_id = step_output.logits[:, -1, :].argmax(-1) + + # check those finished sentences and overwrite + finished_batchs |= (next_word_id == kwargs['eos_token_id']) + next_word_id[finished_batchs] = kwargs['eos_token_id'] + + output_ids[:, word_id] = next_word_id.long() # (batch, ) + + temporal_inputs = torch.cat( + (inputs_embeds, embedding_layer(output_ids[:, :word_id + 1])), + dim=1) + + return OrderedDict({'output_ids': output_ids.long()}) + + +def beam_search_decode(transformer: Callable, **kwargs) -> Tensor: + ## prepare inputs + max_length = kwargs['max_length'] + inputs_embeds = kwargs['inputs_embeds'] # batch x nwords x channel + + # for safety issues + assert kwargs['num_beams'] is not None, ( + 'num_beams should not be provided if calling beam search!') + nbeams = kwargs['num_beams'] + + batch, prefix_length, channel = inputs_embeds.shape + # batch x nbeams x length x channel + expanded_inputs_embeds = inputs_embeds.unsqueeze(1).repeat(1, nbeams, 1, 1) + + ## prepare storage + output_scores = torch.zeros(batch, nbeams).to(inputs_embeds.device) + output_ids = torch.ones(batch, nbeams, max_length).to(inputs_embeds.device) + output_ids = output_ids * kwargs['eos_token_id'] + batch_beam_results = OrderedDict({ + batch_id: [ + [float('-inf'), (float('-inf'), float('-inf')), None, None] \ + for b in range(nbeams)] \ + for batch_id in range(batch) + }) + embedding_layer = transformer.get_input_embeddings() + + for word_id in range(max_length): + + if word_id == 0: # cold start for the first generation step + + step_output = transformer(inputs_embeds=inputs_embeds, ) + # topk inds + topk_scores, topk_inds = step_output.logits[:, -1, :].topk( + k=nbeams, largest=True, dim=-1) # batch x nbeams + + # store temporal scores for each beam + output_ids[..., word_id] = topk_inds + output_scores += torch.log_softmax(topk_scores, dim=-1) + + else: # warm start from the previous step + + # batch x nbeams x word_id + generated_words = output_ids[..., :word_id] + + # batch x nbeams x (length + word_id) x channel + temporal_inputs = torch.cat( + (expanded_inputs_embeds, embedding_layer( + generated_words.long())), + dim=2) + + step_output = transformer(inputs_embeds=temporal_inputs.reshape( + batch * nbeams, prefix_length + word_id, channel), ) + last_word_logits = step_output.logits[:, -1, :].reshape( + batch, nbeams, -1) # batch x nbeams x nvocabs + + # beam_scores: batch x nbeams x nvocabs + if word_id != max_length - 1: + beam_scores = output_scores.unsqueeze(-1) + torch.log_softmax( + last_word_logits, dim=-1) + + output_scores, select_inds = beam_scores.reshape( + batch, -1).topk(k=nbeams, largest=True, dim=-1) + # batch x k + select_beam_id = select_inds // last_word_logits.shape[-1] + select_word_id = select_inds % last_word_logits.shape[-1] + + else: + + # force ends of certain captions + last_word_probs = torch.log_softmax(last_word_logits, dim=-1) + output_scores += last_word_probs[..., kwargs['eos_token_id']] + select_beam_id = \ + torch.arange(nbeams).to(output_ids.device).unsqueeze(0).repeat(batch, 1) + select_word_id = \ + torch.ones_like(output_ids[..., -1]) * kwargs['eos_token_id'] + + # gather generated beams + output_ids = torch.gather( + output_ids, 1, + select_beam_id.unsqueeze(-1).repeat(1, 1, max_length)) + output_ids[..., word_id] = select_word_id + + ## ---- process the finished beams: batch x nbeams + sentence_log_prob = output_scores / (word_id + 1) + + finished_batch, finished_beams = torch.where( + select_word_id == kwargs['eos_token_id']) + for batch_id, beam_id in zip(finished_batch.cpu().tolist(), + finished_beams.cpu().tolist()): + sentence = [ + sentence_log_prob[batch_id, beam_id].cpu().tolist(), + (word_id, beam_id), + output_ids[batch_id, beam_id], # max_length + sentence_log_prob[batch_id, [beam_id]] # 1 + ] + heapq.heappushpop(batch_beam_results[batch_id], sentence) + + # neglect the finished beam + output_scores[select_word_id == + kwargs['eos_token_id']] = -float('inf') + + ## final call, gather beam results from heaps + output_ids = torch.cat([ + torch.cat( + [ + beam_sentence.unsqueeze(0) \ + for _, _, beam_sentence, _ in batch_beam_results[batch_id] + ], dim=0 + ).unsqueeze(0) \ + for batch_id in range(batch) + ], dim=0) # batch x beam x max_length + + output_scores = torch.cat([ + torch.cat( + [ + beam_log_prob.unsqueeze(0) \ + for _, _, _, beam_log_prob in batch_beam_results[batch_id] + ], dim=0 + ).unsqueeze(0) \ + for batch_id in range(batch) + ], dim=0).squeeze(-1) # batch x beam x 1 + + return OrderedDict({ + 'output_ids': + torch.gather( + output_ids.long(), 1, + output_scores.argmax(-1, keepdim=True).unsqueeze(1).repeat( + 1, 1, max_length)).squeeze(1), + 'output_scores': + output_scores, + 'beam_output_ids': + output_ids.long() + }) + + +def generation(transformer: Callable, **kwargs): + + # parse keyword arguments, and assign default values + kwargs['max_length'] = kwargs.get('max_length', 32) + kwargs['early_stopping'] = kwargs.get('early_stopping', True) + kwargs['num_beams'] = kwargs.get('num_beams', None) + kwargs['eos_token_id'] = kwargs.get('eos_token_id', -1) + kwargs['restore_prefix'] = kwargs.get('restore_prefix', False) + + input_ids = kwargs.get('input_ids', None) + inputs_embeds = kwargs.get('inputs_embeds', None) + embedding_layer = transformer.get_input_embeddings() + + if inputs_embeds is not None: + assert input_ids is None, ( + 'for safety issues, inputs_embeds is prior to input_ids!') + elif input_ids is not None: + kwargs['inputs_embeds'] = embedding_layer(input_ids) + else: + raise NotImplementedError + + if kwargs['num_beams'] is None: + # batch x max_length + outputs = greedy_decode(transformer, **kwargs) + else: + outputs = beam_search_decode(transformer, **kwargs) + + ## post-processing, adding prefix if necessary + if kwargs['restore_prefix'] is True: + assert input_ids is not None, ( + 'prefix could be only added when prefix ids is provided!') + outputs['output_ids'] = torch.cat([input_ids, outputs['output_ids']], + dim=-1) + + return outputs diff --git a/models/LL3DA/models/ll3da/position_embedding.py b/models/LL3DA/models/ll3da/position_embedding.py new file mode 100644 index 0000000..bf8c163 --- /dev/null +++ b/models/LL3DA/models/ll3da/position_embedding.py @@ -0,0 +1,139 @@ +"""Various positional encodings for the transformer.""" +import math + +import numpy as np +import torch +from torch import nn +from utils.pc_util import shift_scale_points + + +class PositionEmbeddingCoordsSine(nn.Module): + + def __init__( + self, + temperature=10000, + normalize=False, + scale=None, + pos_type='fourier', + d_pos=None, + d_in=3, + gauss_scale=1.0, + ): + super().__init__() + self.temperature = temperature + self.normalize = normalize + if scale is not None and normalize is False: + raise ValueError('normalize should be True if scale is passed') + if scale is None: + scale = 2 * math.pi + assert pos_type in ['sine', 'fourier'] + self.pos_type = pos_type + self.scale = scale + if pos_type == 'fourier': + assert d_pos is not None + assert d_pos % 2 == 0 + # define a gaussian matrix input_ch -> output_ch + B = torch.empty((d_in, d_pos // 2)).normal_() + B *= gauss_scale + self.register_buffer('gauss_B', B) + self.d_pos = d_pos + + def get_sine_embeddings(self, xyz, num_channels, input_range): + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + ndim = num_channels // xyz.shape[2] + if ndim % 2 != 0: + ndim -= 1 + # automatically handle remainder by assiging it to the first dim + rems = num_channels - (ndim * xyz.shape[2]) + + assert ( + ndim % 2 == 0 + ), f'Cannot handle odd sized ndim={ndim} where num_channels={num_channels} and xyz={xyz.shape}' + + final_embeds = [] + prev_dim = 0 + + for d in range(xyz.shape[2]): + cdim = ndim + if rems > 0: + # add remainder in increments of two to maintain even size + cdim += 2 + rems -= 2 + + if cdim != prev_dim: + dim_t = torch.arange(cdim, + dtype=torch.float32, + device=xyz.device) + dim_t = self.temperature**(2 * (dim_t // 2) / cdim) + + # create batch x cdim x nccords embedding + raw_pos = xyz[:, :, d] + if self.scale: + raw_pos *= self.scale + pos = raw_pos[:, :, None] / dim_t + pos = torch.stack((pos[:, :, 0::2].sin(), pos[:, :, 1::2].cos()), + dim=3).flatten(2) + final_embeds.append(pos) + prev_dim = cdim + + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def get_fourier_embeddings(self, xyz, num_channels=None, input_range=None): + # Follows - https://people.eecs.berkeley.edu/~bmild/fourfeat/index.html + + if num_channels is None: + num_channels = self.gauss_B.shape[1] * 2 + + bsize, npoints = xyz.shape[0], xyz.shape[1] + assert num_channels > 0 and num_channels % 2 == 0 + d_in, max_d_out = self.gauss_B.shape[0], self.gauss_B.shape[1] + d_out = num_channels // 2 + assert d_out <= max_d_out + assert d_in == xyz.shape[-1] + + # clone coords so that shift/scale operations do not affect original tensor + orig_xyz = xyz + xyz = orig_xyz.clone() + + ncoords = xyz.shape[1] + if self.normalize: + xyz = shift_scale_points(xyz, src_range=input_range) + + xyz *= 2 * np.pi + xyz_proj = torch.mm(xyz.view(-1, d_in), self.gauss_B[:, :d_out]).view( + bsize, npoints, d_out) + final_embeds = [xyz_proj.sin(), xyz_proj.cos()] + + # return batch x d_pos x npoints embedding + final_embeds = torch.cat(final_embeds, dim=2).permute(0, 2, 1) + return final_embeds + + def forward(self, xyz, num_channels=None, input_range=None): + assert isinstance(xyz, torch.Tensor) + assert xyz.ndim == 3 + # xyz is batch x npoints x 3 + if self.pos_type == 'sine': + with torch.no_grad(): + return self.get_sine_embeddings(xyz, num_channels, input_range) + elif self.pos_type == 'fourier': + with torch.no_grad(): + return self.get_fourier_embeddings(xyz, num_channels, + input_range) + else: + raise ValueError(f'Unknown {self.pos_type}') + + def extra_repr(self): + st = f'type={self.pos_type}, scale={self.scale}, normalize={self.normalize}' + if hasattr(self, 'gauss_B'): + st += ( + f', gaussB={self.gauss_B.shape}, gaussBsum={self.gauss_B.sum().item()}' + ) + return st diff --git a/models/LL3DA/models/model_general.py b/models/LL3DA/models/model_general.py new file mode 100644 index 0000000..f112865 --- /dev/null +++ b/models/LL3DA/models/model_general.py @@ -0,0 +1,89 @@ +import importlib + +import torch +from torch import nn + + +class CaptionNet(nn.Module): + + def train(self, mode: bool = True): + super().train(mode) + if self.freeze_detector is True: + self.detector.eval() + for param in self.detector.parameters(): + param.requires_grad = False + return self + + def pretrained_parameters(self): + if hasattr(self.captioner, 'pretrained_parameters'): + return self.captioner.pretrained_parameters() + else: + return [] + + def __init__(self, args, dataset_config, train_dataset): + super(CaptionNet, self).__init__() + + self.freeze_detector = args.freeze_detector + self.detector = None + self.captioner = None + + if args.detector is not None: + detector_module = importlib.import_module( + f'models.{args.detector}.detector') + self.detector = detector_module.detector(args, dataset_config) + + if args.captioner is not None: + captioner_module = importlib.import_module( + f'models.{args.captioner}.captioner') + self.captioner = captioner_module.captioner(args, train_dataset) + + self.train() + + def forward(self, + batch_data_label: dict, + is_eval: bool = False, + task_name: str = None) -> dict: + + outputs = {'loss': torch.zeros(1)[0].cuda()} + + # in the LL3DA paper, the detector is always freeze. + if self.detector is not None: + if self.freeze_detector is True: + outputs = self.detector(batch_data_label, is_eval=True) + else: + outputs = self.detector(batch_data_label, is_eval=is_eval) + + # so it's no need to count loss + if self.freeze_detector is True: + outputs['loss'] = torch.zeros(1)[0].cuda() + + # this is the output of the detector + # box_predictions['outputs'].update({ + # 'prop_features': box_features.permute(0, 2, 1, 3), # nlayers x batch x nqueries x channel # the feature of each proposal (box) + # 'enc_features': enc_features.permute(1, 0, 2), # batch x npoints x channel # the feature of the whole scene + # 'enc_xyz': enc_xyz, # batch x npoints x 3 + # 'query_xyz': query_xyz, # batch x nqueries x 3 + # }) + # "sem_cls_logits": cls_logits[l], + # "center_normalized": center_normalized.contiguous(), + # "center_unnormalized": center_unnormalized, + # "size_normalized": size_normalized[l], + # "size_unnormalized": size_unnormalized, + # "angle_logits": angle_logits[l], + # "angle_residual": angle_residual[l], + # "angle_residual_normalized": angle_residual_normalized[l], + # "angle_continuous": angle_continuous, + # "objectness_prob": objectness_prob, + # "sem_cls_prob": semcls_prob, + # "box_corners": box_corners, + + if self.captioner is not None: + outputs = self.captioner(outputs, + batch_data_label, + is_eval=is_eval, + task_name=task_name) + else: + batch, nproposals, _, _ = outputs['box_corners'].shape + outputs['lang_cap'] = [['this is a valid match!'] * nproposals + ] * batch + return outputs diff --git a/models/LL3DA/scripts/opt-1.3b/eval.generalist.sh b/models/LL3DA/scripts/opt-1.3b/eval.generalist.sh new file mode 100644 index 0000000..38bcd0e --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/eval.generalist.sh @@ -0,0 +1,41 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 +# unified_embodied_scan_caption +#/mnt/hwfile/OpenRobotLab/yangshuai1/ll3da/ckpts/fine_tune_full/checkpoint_140k.pth +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --test_ckpt /mnt/petrelfs/linjingli/mmscan_modelzoo-main/llmzoo/LL3DA/ckpts/opt-1.3b/train_qa_7_31/checkpoint_100k.pth \ + --dataset unified_embodied_scan_qa \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir ckpts/opt-1.3b/test_7_31 \ + --dist_url tcp://localhost:12345 \ + --criterion 'CiDEr' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 12 --ngpus 4 \ + --max_des_len 512 \ + --max_prompt 1 \ + --use_beam_search \ + --test_only + +# python main.py \ +# --use_color --use_normal \ +# --detector detector_Vote2Cap_DETR \ +# --captioner ll3da \ +# --checkpoint_dir ./ckpts/opt-1.3b/ll3da-generalist \ +# --test_ckpt ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ +# --dataset unified_3dllm_scene_description,unified_3dllm_embodied_dialogue,unified_3dllm_embodied_planning,unified_scanqa,unified_densecap_nr3d,unified_densecap_scanrefer \ +# --vocab facebook/opt-1.3b \ +# --qformer_vocab bert-base-embedding \ +# --dist_url tcp://localhost:12345 \ +# --criterion 'CiDEr' \ +# --freeze_detector --freeze_llm \ +# --batchsize_per_gpu 4 --ngpus 8 \ +# --max_des_len 512 \ +# --max_prompt 1 \ +# --use_beam_search \ +# --test_only diff --git a/models/LL3DA/scripts/opt-1.3b/eval.mmscanqa.sh b/models/LL3DA/scripts/opt-1.3b/eval.mmscanqa.sh new file mode 100644 index 0000000..c069b5b --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/eval.mmscanqa.sh @@ -0,0 +1,24 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +# change the test_ckpt and checkpoint_dir + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --checkpoint_dir path/to/record \ + --test_ckpt path/to/ckpt \ + --dataset unified_embodied_scan_qa \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --dist_url tcp://localhost:1233 \ + --criterion 'refined_EM' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 4 \ + --max_des_len 224 \ + --max_prompt 1 \ + --use_beam_search \ + --test_only diff --git a/models/LL3DA/scripts/opt-1.3b/eval.nr3d.sh b/models/LL3DA/scripts/opt-1.3b/eval.nr3d.sh new file mode 100644 index 0000000..58748d6 --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/eval.nr3d.sh @@ -0,0 +1,23 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=4,5 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-generalist \ + --test_ckpt ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ + --dataset unified_densecap_nr3d \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --dist_url tcp://localhost:111 \ + --criterion 'CiDEr@0.5' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 2 \ + --max_des_len 256 \ + --max_prompt 1 \ + --use_beam_search \ + --test_only diff --git a/models/LL3DA/scripts/opt-1.3b/eval.scanqa.sh b/models/LL3DA/scripts/opt-1.3b/eval.scanqa.sh new file mode 100644 index 0000000..9204c0b --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/eval.scanqa.sh @@ -0,0 +1,23 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=0,1 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-generalist \ + --test_ckpt ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ + --dataset unified_scanqa \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --dist_url tcp://localhost:333 \ + --criterion 'CiDEr' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 2 \ + --max_des_len 256 \ + --max_prompt 1 \ + --use_beam_search \ + --test_only diff --git a/models/LL3DA/scripts/opt-1.3b/eval.scanrefer.sh b/models/LL3DA/scripts/opt-1.3b/eval.scanrefer.sh new file mode 100644 index 0000000..d2df440 --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/eval.scanrefer.sh @@ -0,0 +1,23 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=6,7 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-generalist \ + --test_ckpt ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ + --dataset unified_densecap_scanrefer \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --dist_url tcp://localhost:222 \ + --criterion 'CiDEr@0.5' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 2 \ + --max_des_len 256 \ + --max_prompt 1 \ + --use_beam_search \ + --test_only diff --git a/models/LL3DA/scripts/opt-1.3b/train.generalist.sh b/models/LL3DA/scripts/opt-1.3b/train.generalist.sh new file mode 100644 index 0000000..7890dde --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/train.generalist.sh @@ -0,0 +1,25 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 +#unified_embodied_scan_caption, +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --pretrained_weights ./pretrained/vote2cap-detr/scannet_vote2cap_detr_XYZ_COLOR_NORMAL.pth \ + --warm_lr_epochs 1 \ + --dataset unified_embodied_scan_qa \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir ./ckpts/opt-1.3b/train_qa_8_2 \ + --max_epoch 32 \ + --dist_url tcp://localhost:12345 \ + --eval_every_iteration 300000 \ + --start_eval_after 19999 \ + --save_every 10000 \ + --criterion 'CiDEr' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 4 --ngpus 1 --base_lr 1e-4 --final_lr 1e-6 \ + --max_des_len 512 \ + --max_prompt 1 --use_beam_search diff --git a/models/LL3DA/scripts/opt-1.3b/tuning.mmscanqa.sh b/models/LL3DA/scripts/opt-1.3b/tuning.mmscanqa.sh new file mode 100644 index 0000000..48b4015 --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/tuning.mmscanqa.sh @@ -0,0 +1,27 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +# change the pretrained_weights and checkpoint_dir + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --pretrained_weights path/to/pretrained-weights \ + --warm_lr_epochs 0 \ + --dataset unified_embodied_scan_qa \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir path/to/record \ + --max_epoch 4 \ + --dist_url tcp://localhost:12345 \ + --eval_every_iteration 3000000 \ + --start_eval_after -1 \ + --save_every 10000 \ + --criterion 'refined_EM' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 4 --base_lr 1e-6 --final_lr 1e-6 \ + --max_des_len 224 \ + --max_prompt 1 --use_beam_search diff --git a/models/LL3DA/scripts/opt-1.3b/tuning.nr3d.sh b/models/LL3DA/scripts/opt-1.3b/tuning.nr3d.sh new file mode 100644 index 0000000..f4b1416 --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/tuning.nr3d.sh @@ -0,0 +1,26 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=4,5 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --pretrained_weights ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ + --warm_lr_epochs 0 \ + --dataset unified_densecap_nr3d \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-nr3d-tuned \ + --max_epoch 32 \ + --dist_url tcp://localhost:111 \ + --eval_every_iteration 4000 \ + --start_eval_after -1 \ + --save_every 10000 \ + --criterion 'CiDEr@0.5' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 2 --base_lr 1e-6 --final_lr 1e-6 \ + --max_des_len 256 \ + --max_prompt 1 --use_beam_search diff --git a/models/LL3DA/scripts/opt-1.3b/tuning.ovdet.sh b/models/LL3DA/scripts/opt-1.3b/tuning.ovdet.sh new file mode 100644 index 0000000..40a0766 --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/tuning.ovdet.sh @@ -0,0 +1,26 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=0,1,2,3,4,5,6,7 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --pretrained_weights ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ + --warm_lr_epochs 1 \ + --dataset unified_ovdet_nr3d,unified_ovdet_scanrefer \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-ovdet \ + --max_epoch 32 \ + --dist_url tcp://localhost:123 \ + --eval_every_iteration 4000 \ + --start_eval_after -1 \ + --save_every 10000 \ + --criterion 'CiDEr@0.5' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 4 --ngpus 8 --base_lr 1e-4 --final_lr 1e-6 \ + --max_des_len 256 \ + --max_prompt 1 diff --git a/models/LL3DA/scripts/opt-1.3b/tuning.scanqa.sh b/models/LL3DA/scripts/opt-1.3b/tuning.scanqa.sh new file mode 100644 index 0000000..507d1b7 --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/tuning.scanqa.sh @@ -0,0 +1,26 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=0,1 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --pretrained_weights /mnt/petrelfs/linjingli/mmscan_modelzoo-main/llmzoo/LL3DA/pretrained/ll-gernalist/ll3da-opt-1.3b.pth \ + --warm_lr_epochs 0 \ + --dataset unified_scanqa \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-scanqa-tuned \ + --max_epoch 24 \ + --dist_url tcp://localhost:333 \ + --eval_every_iteration 4000 \ + --start_eval_after -1 \ + --save_every 10000 \ + --criterion 'CiDEr' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 2 --base_lr 1e-6 --final_lr 1e-6 \ + --max_des_len 224 \ + --max_prompt 1 --use_beam_search diff --git a/models/LL3DA/scripts/opt-1.3b/tuning.scanrefer.sh b/models/LL3DA/scripts/opt-1.3b/tuning.scanrefer.sh new file mode 100644 index 0000000..6cb6d0a --- /dev/null +++ b/models/LL3DA/scripts/opt-1.3b/tuning.scanrefer.sh @@ -0,0 +1,26 @@ +export PYTHONWARNINGS='ignore:semaphore_tracker:UserWarning' +export CUDA_VISIBLE_DEVICES=6,7 +export MKL_NUM_THREADS=1 +export NUMEXPR_NUM_THREADS=1 +export OMP_NUM_THREADS=1 + +python main.py \ + --use_color --use_normal \ + --detector detector_Vote2Cap_DETR \ + --captioner ll3da \ + --pretrained_weights ./ckpts/opt-1.3b/ll3da-generalist/checkpoint.pth \ + --warm_lr_epochs 0 \ + --dataset unified_densecap_scanrefer \ + --vocab facebook/opt-1.3b \ + --qformer_vocab bert-base-embedding \ + --checkpoint_dir ./ckpts/opt-1.3b/ll3da-scanrefer-tuned \ + --max_epoch 16 \ + --dist_url tcp://localhost:222 \ + --eval_every_iteration 4000 \ + --start_eval_after -1 \ + --save_every 10000 \ + --criterion 'CiDEr@0.5' \ + --freeze_detector --freeze_llm \ + --batchsize_per_gpu 8 --ngpus 2 --base_lr 1e-6 --final_lr 1e-6 \ + --max_des_len 256 \ + --max_prompt 1 --use_beam_search diff --git a/models/LL3DA/submit_scanqa.py b/models/LL3DA/submit_scanqa.py new file mode 100644 index 0000000..b6e4c77 --- /dev/null +++ b/models/LL3DA/submit_scanqa.py @@ -0,0 +1,331 @@ +import argparse +import importlib +import json +import os +import time +from collections import OrderedDict + +import numpy as np +import torch +from engine import do_train +from models.model_general import CaptionNet +from torch.multiprocessing import set_start_method +from utils.dist import (all_gather_dict, barrier, get_rank, init_distributed, + is_distributed, is_primary) +from utils.io import resume_if_possible +from utils.misc import SmoothedValue, my_worker_init_fn + + +def make_args_parser(): + parser = argparse.ArgumentParser( + 'End-to-End 3D Dense Captioning with Vote2Cap-DETR', add_help=False) + + ##### Model ##### + # input based parameters + parser.add_argument('--use_color', default=False, action='store_true') + parser.add_argument('--use_normal', default=False, action='store_true') + parser.add_argument('--no_height', default=False, action='store_true') + parser.add_argument('--use_multiview', default=False, action='store_true') + parser.add_argument('--max_prompts', + default=16, + type=int, + help='number of visual interactions') + parser.add_argument('--grid_size_3d', + default=255, + type=int, + help='grid size of 3D environ') + + parser.add_argument('--detector', + default='detector_Vote2Cap_DETR', + choices=['detector_votenet', 'detector_Vote2Cap_DETR'], + help='folder of the detector') + parser.add_argument('--captioner', + default=None, + type=str, + help='folder of the captioner') + parser.add_argument( + '--freeze_detector', + default=False, + action='store_true', + help='freeze all parameters other than the caption head') + parser.add_argument('--freeze_llm', + default=False, + action='store_true', + help='freeze the llm for caption generation') + + # caption related hyper parameters + parser.add_argument( + '--use_beam_search', + default=False, + action='store_true', + help='whether use beam search during caption generation.') + parser.add_argument('--max_des_len', + default=32, + type=int, + help='maximum length of object descriptions.') + + ##### Dataset ##### + parser.add_argument( + '--dataset', + default='scannet', + help='dataset file which stores `dataset` and `dataset_config` class', + ) + parser.add_argument('--vocab', + default='facebook/opt-1.3b', + type=str, + help='should be one of `gpt2` or `scanrefer`') + parser.add_argument('--qformer_vocab', + default='bert-base-embedding', + type=str, + help='should be one of `gpt2` or `scanrefer`') + parser.add_argument('--dataset_num_workers', default=4, type=int) + parser.add_argument('--batchsize_per_gpu', default=8, type=int) + parser.add_argument('--seed', default=0, type=int) + + ##### Testing ##### + parser.add_argument('--test_ckpt', default='', type=str) + + ##### I/O ##### + parser.add_argument('--checkpoint_dir', default=None, type=str) + parser.add_argument('--log_every', default=10, type=int) + + ##### Distributed ##### + parser.add_argument('--ngpus', default=1, type=int, help='number of gpus') + parser.add_argument('--dist_url', + default='tcp://localhost:12345', + type=str) + + args = parser.parse_args() + args.use_height = not args.no_height + + return args + + +@torch.no_grad() +def evaluate( + args, + task_name, + curr_epoch, + model, + dataset_config, + dataset_loader, + logout=print, + curr_train_iter=-1, +): + + # prepare ground truth caption labels + print('preparing corpus...') + + annotations = dataset_loader.dataset.annotations + candidates = [] + ### initialize and prepare for evaluation + tokenizer = dataset_loader.dataset.tokenizer + net_device = next(model.parameters()).device + num_batches = len(dataset_loader) + + time_delta = SmoothedValue(window_size=10) + + model.eval() + barrier() + + epoch_str = f'[{curr_epoch}/{args.max_epoch}]' if curr_epoch > 0 else '' + + for curr_iter, batch_data_label in enumerate(dataset_loader): + + curr_time = time.time() + for key in batch_data_label: + batch_data_label[key] = batch_data_label[key].to(net_device) + + model_input = { + 'point_clouds': batch_data_label['point_clouds'], + 'point_cloud_dims_min': batch_data_label['point_cloud_dims_min'], + 'point_cloud_dims_max': batch_data_label['point_cloud_dims_max'], + 'qformer_input_ids': batch_data_label['qformer_input_ids'], + 'qformer_attention_mask': + batch_data_label['qformer_attention_mask'], + 'instruction': batch_data_label['instruction'], + 'instruction_mask': batch_data_label['instruction_mask'], + } + outputs = model(model_input, is_eval=True, task_name='qa') + + outputs = dict(output_ids=outputs['output_ids'], ) + + outputs = all_gather_dict(outputs) + batch_data_label = all_gather_dict(batch_data_label) + + output_ids = outputs['output_ids'] # batch x max_length + answers = tokenizer.batch_decode(output_ids, + skip_special_tokens=True, + clean_up_tokenization_spaces=False) + + quesition_index = batch_data_label['scan_idx'].reshape(-1) + quesition_index = quesition_index.cpu().tolist() + + for idx in range(output_ids.shape[0]): + anno = annotations[quesition_index[idx]] + key = anno['question_id'] + answer = answers[idx] + answer = ' '.join(filter(lambda w: w, answer.split(' '))) + top10_answer = [answer for _ in range(10)] + + candidates.append({ + 'scene_id': anno['scene_id'], + 'question_id': key, + 'answer_top10': top10_answer, + 'bbox': [] + }) + + # Memory intensive as it gathers point cloud GT tensor across all ranks + time_delta.update(time.time() - curr_time) + + if is_primary() and curr_iter % args.log_every == 0: + mem_mb = torch.cuda.max_memory_allocated() / (1024**2) + logout(f'Evaluate {epoch_str}; Batch [{curr_iter}/{num_batches}]; ' + f'Evaluating on iter: {curr_train_iter}; ' + f'Iter time {time_delta.avg:0.2f}; Mem {mem_mb:0.2f}MB') + barrier() + + # end of forward pass traversion + if is_primary(): + with open(os.path.join(args.checkpoint_dir, f'{task_name}.json'), + 'w') as f: + json.dump(candidates, f, indent=4) + + return None + + +def build_dataset(args): + dataset_module = importlib.import_module(f'datasets.{args.dataset}') + dataset_config = dataset_module.DatasetConfig() + + datasets = { + 'train': + dataset_module.Dataset(args, + dataset_config, + split_set='train', + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview, + use_height=args.use_height, + augment=True), + 'val': + dataset_module.Dataset(args, + dataset_config, + split_set='val', + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview, + use_height=args.use_height, + augment=False), + 'test_w_obj': + dataset_module.Dataset(args, + dataset_config, + split_set='test_w_obj', + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview, + use_height=args.use_height, + augment=False), + 'test_wo_obj': + dataset_module.Dataset(args, + dataset_config, + split_set='test_wo_obj', + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview, + use_height=args.use_height, + augment=False), + } + + dataloaders = {} + for split in datasets.keys(): + if is_distributed(): + sampler = torch.utils.data.DistributedSampler( + datasets[split], shuffle=(split == 'train')) + else: + if split == 'train': + sampler = torch.utils.data.RandomSampler(datasets[split]) + else: + sampler = torch.utils.data.SequentialSampler(datasets[split]) + + dataloaders[split] = torch.utils.data.DataLoader( + datasets[split], + sampler=sampler, + batch_size=args.batchsize_per_gpu, + num_workers=args.dataset_num_workers, + worker_init_fn=my_worker_init_fn, + ) + dataloaders[split + '_sampler'] = sampler + + return dataset_config, datasets, dataloaders + + +def main(local_rank, args): + + if args.ngpus > 1: + init_distributed( + local_rank, + global_rank=local_rank, + world_size=args.ngpus, + dist_url=args.dist_url, + dist_backend='nccl', + ) + + torch.cuda.set_device(local_rank) + np.random.seed(args.seed) + torch.cuda.manual_seed_all(args.seed + get_rank()) + + if args.checkpoint_dir is not None: + pass + elif args.test_ckpt is not None: + args.checkpoint_dir = os.path.dirname(args.test_ckpt) + print(f'testing directory: {args.checkpoint_dir}') + else: + raise AssertionError( + 'Either checkpoint_dir or test_ckpt should be presented!') + os.makedirs(args.checkpoint_dir, exist_ok=True) + + ### build datasets and dataloaders + dataset_config, datasets, dataloaders = build_dataset(args) + model = CaptionNet(args, dataset_config, datasets['train']).cuda() + + model = model.cuda(local_rank) + model_no_ddp = model + + if is_distributed(): + model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) + model = torch.nn.parallel.DistributedDataParallel( + model, device_ids=[local_rank]) + + # testing phase + checkpoint = torch.load(args.test_ckpt, map_location=torch.device('cpu')) + model_no_ddp.load_state_dict(checkpoint['model'], strict=False) + + evaluate(args, 'val', -1, model, dataset_config, dataloaders['val']) + + evaluate(args, 'test_wo_obj', -1, model, dataset_config, + dataloaders['test_wo_obj']) + + evaluate(args, 'test_w_obj', -1, model, dataset_config, + dataloaders['test_w_obj']) + return + + +def launch_distributed(args): + world_size = args.ngpus + if world_size == 1: + main(local_rank=0, args=args) + else: + torch.multiprocessing.spawn(main, nprocs=world_size, args=(args, )) + + +if __name__ == '__main__': + args = make_args_parser() + + os.environ['PYTHONWARNINGS'] = 'ignore:semaphore_tracker:UserWarning' + + try: + set_start_method('spawn') + except RuntimeError: + pass + launch_distributed(args) diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/include/ball_query.h b/models/LL3DA/third_party/pointnet2/_ext_src/include/ball_query.h new file mode 100644 index 0000000..b4feff8 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/include/ball_query.h @@ -0,0 +1,7 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#pragma once +#include + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample); diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/include/cuda_utils.h b/models/LL3DA/third_party/pointnet2/_ext_src/include/cuda_utils.h new file mode 100644 index 0000000..f746526 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/include/cuda_utils.h @@ -0,0 +1,43 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include +#include + +#include +#include + +#include + +#define TOTAL_THREADS 512 + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return max(min(1 << pow_2, TOTAL_THREADS), 1); +} + +inline dim3 opt_block_config(int x, int y) { + const int x_threads = opt_n_threads(x); + const int y_threads = + max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); + dim3 block_config(x_threads, y_threads, 1); + + return block_config; +} + +#define CUDA_CHECK_ERRORS() \ + do { \ + cudaError_t err = cudaGetLastError(); \ + if (cudaSuccess != err) { \ + fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ + cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ + __FILE__); \ + exit(-1); \ + } \ + } while (0) + +#endif diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/include/group_points.h b/models/LL3DA/third_party/pointnet2/_ext_src/include/group_points.h new file mode 100644 index 0000000..97be802 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/include/group_points.h @@ -0,0 +1,8 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#pragma once +#include + +at::Tensor group_points(at::Tensor points, at::Tensor idx); +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/include/interpolate.h b/models/LL3DA/third_party/pointnet2/_ext_src/include/interpolate.h new file mode 100644 index 0000000..e7fb792 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/include/interpolate.h @@ -0,0 +1,12 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#pragma once + +#include +#include + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows); +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight); +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m); diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/include/sampling.h b/models/LL3DA/third_party/pointnet2/_ext_src/include/sampling.h new file mode 100644 index 0000000..7de473e --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/include/sampling.h @@ -0,0 +1,9 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#pragma once +#include + +at::Tensor gather_points(at::Tensor points, at::Tensor idx); +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/include/utils.h b/models/LL3DA/third_party/pointnet2/_ext_src/include/utils.h new file mode 100644 index 0000000..815dabb --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/include/utils.h @@ -0,0 +1,28 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#pragma once +#include +#include + +#define CHECK_CUDA(x) \ + do { \ + AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \ + } while (0) + +#define CHECK_CONTIGUOUS(x) \ + do { \ + AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \ + } while (0) + +#define CHECK_IS_INT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \ + #x " must be an int tensor"); \ + } while (0) + +#define CHECK_IS_FLOAT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \ + #x " must be a float tensor"); \ + } while (0) diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/ball_query.cpp b/models/LL3DA/third_party/pointnet2/_ext_src/src/ball_query.cpp new file mode 100644 index 0000000..7dd77d5 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/ball_query.cpp @@ -0,0 +1,35 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include "ball_query.h" +#include "utils.h" + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx); + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample) { + CHECK_CONTIGUOUS(new_xyz); + CHECK_CONTIGUOUS(xyz); + CHECK_IS_FLOAT(new_xyz); + CHECK_IS_FLOAT(xyz); + + if (new_xyz.is_cuda()) { + CHECK_CUDA(xyz); + } + + at::Tensor idx = + torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, + at::device(new_xyz.device()).dtype(at::ScalarType::Int)); + + if (new_xyz.is_cuda()) { + query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), + radius, nsample, new_xyz.data(), + xyz.data(), idx.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return idx; +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/ball_query_gpu.cu b/models/LL3DA/third_party/pointnet2/_ext_src/src/ball_query_gpu.cu new file mode 100644 index 0000000..cee88cb --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/ball_query_gpu.cu @@ -0,0 +1,57 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include +#include + +#include "cuda_utils.h" + +// input: new_xyz(b, m, 3) xyz(b, n, 3) +// output: idx(b, m, nsample) +__global__ void query_ball_point_kernel(int b, int n, int m, float radius, + int nsample, + const float *__restrict__ new_xyz, + const float *__restrict__ xyz, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + xyz += batch_index * n * 3; + new_xyz += batch_index * m * 3; + idx += m * nsample * batch_index; + + int index = threadIdx.x; + int stride = blockDim.x; + + float radius2 = radius * radius; + for (int j = index; j < m; j += stride) { + float new_x = new_xyz[j * 3 + 0]; + float new_y = new_xyz[j * 3 + 1]; + float new_z = new_xyz[j * 3 + 2]; + for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 < radius2) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[j * nsample + l] = k; + } + } + idx[j * nsample + cnt] = k; + ++cnt; + } + } + } +} + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + query_ball_point_kernel<<>>( + b, n, m, radius, nsample, new_xyz, xyz, idx); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/bindings.cpp b/models/LL3DA/third_party/pointnet2/_ext_src/src/bindings.cpp new file mode 100644 index 0000000..58d6c2d --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/bindings.cpp @@ -0,0 +1,22 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include "ball_query.h" +#include "group_points.h" +#include "interpolate.h" +#include "sampling.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("gather_points", &gather_points); + m.def("gather_points_grad", &gather_points_grad); + m.def("furthest_point_sampling", &furthest_point_sampling); + + m.def("three_nn", &three_nn); + m.def("three_interpolate", &three_interpolate); + m.def("three_interpolate_grad", &three_interpolate_grad); + + m.def("ball_query", &ball_query); + + m.def("group_points", &group_points); + m.def("group_points_grad", &group_points_grad); +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/group_points.cpp b/models/LL3DA/third_party/pointnet2/_ext_src/src/group_points.cpp new file mode 100644 index 0000000..22998dd --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/group_points.cpp @@ -0,0 +1,63 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include "group_points.h" +#include "utils.h" + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out); + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points); + +at::Tensor group_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), idx.size(2), points.data(), + idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + group_points_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), + grad_out.data(), idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/group_points_gpu.cu b/models/LL3DA/third_party/pointnet2/_ext_src/src/group_points_gpu.cu new file mode 100644 index 0000000..e36672e --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/group_points_gpu.cu @@ -0,0 +1,78 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, npoints, nsample) +// output: out(b, c, npoints, nsample) +__global__ void group_points_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * n * c; + idx += batch_index * npoints * nsample; + out += batch_index * npoints * nsample * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + out[(l * npoints + j) * nsample + k] = points[l * n + ii]; + } + } +} + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_kernel<<>>( + b, c, n, npoints, nsample, points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) +// output: grad_points(b, c, n) +__global__ void group_points_grad_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * npoints * nsample * c; + idx += batch_index * npoints * nsample; + grad_points += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + atomicAdd(grad_points + l * n + ii, + grad_out[(l * npoints + j) * nsample + k]); + } + } +} + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_grad_kernel<<>>( + b, c, n, npoints, nsample, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/interpolate.cpp b/models/LL3DA/third_party/pointnet2/_ext_src/src/interpolate.cpp new file mode 100644 index 0000000..4b680c5 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/interpolate.cpp @@ -0,0 +1,101 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include "interpolate.h" +#include "utils.h" + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx); +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out); +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points); + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows) { + CHECK_CONTIGUOUS(unknowns); + CHECK_CONTIGUOUS(knows); + CHECK_IS_FLOAT(unknowns); + CHECK_IS_FLOAT(knows); + + if (unknowns.is_cuda()) { + CHECK_CUDA(knows); + } + + at::Tensor idx = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Int)); + at::Tensor dist2 = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Float)); + + if (unknowns.is_cuda()) { + three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1), + unknowns.data(), knows.data(), + dist2.data(), idx.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return {dist2, idx}; +} + +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + three_interpolate_kernel_wrapper( + points.size(0), points.size(1), points.size(2), idx.size(1), + points.data(), idx.data(), weight.data(), + output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), m}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + three_interpolate_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), grad_out.size(2), m, + grad_out.data(), idx.data(), weight.data(), + output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/interpolate_gpu.cu b/models/LL3DA/third_party/pointnet2/_ext_src/src/interpolate_gpu.cu new file mode 100644 index 0000000..b4c5644 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/interpolate_gpu.cu @@ -0,0 +1,157 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include +#include + +#include "cuda_utils.h" + +// input: unknown(b, n, 3) known(b, m, 3) +// output: dist2(b, n, 3), idx(b, n, 3) +__global__ void three_nn_kernel(int b, int n, int m, + const float *__restrict__ unknown, + const float *__restrict__ known, + float *__restrict__ dist2, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + unknown += batch_index * n * 3; + known += batch_index * m * 3; + dist2 += batch_index * n * 3; + idx += batch_index * n * 3; + + int index = threadIdx.x; + int stride = blockDim.x; + for (int j = index; j < n; j += stride) { + float ux = unknown[j * 3 + 0]; + float uy = unknown[j * 3 + 1]; + float uz = unknown[j * 3 + 2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; + besti3 = besti2; + best2 = best1; + besti2 = besti1; + best1 = d; + besti1 = k; + } else if (d < best2) { + best3 = best2; + besti3 = besti2; + best2 = d; + besti2 = k; + } else if (d < best3) { + best3 = d; + besti3 = k; + } + } + dist2[j * 3 + 0] = best1; + dist2[j * 3 + 1] = best2; + dist2[j * 3 + 2] = best3; + + idx[j * 3 + 0] = besti1; + idx[j * 3 + 1] = besti2; + idx[j * 3 + 2] = besti3; + } +} + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_nn_kernel<<>>(b, n, m, unknown, known, + dist2, idx); + + CUDA_CHECK_ERRORS(); +} + +// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) +// output: out(b, c, n) +__global__ void three_interpolate_kernel(int b, int c, int m, int n, + const float *__restrict__ points, + const int *__restrict__ idx, + const float *__restrict__ weight, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * m * c; + + idx += batch_index * n * 3; + weight += batch_index * n * 3; + + out += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + + points[l * m + i3] * w3; + } +} + +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_kernel<<>>( + b, c, m, n, points, idx, weight, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) +// output: grad_points(b, c, m) + +__global__ void three_interpolate_grad_kernel( + int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, const float *__restrict__ weight, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * n * c; + idx += batch_index * n * 3; + weight += batch_index * n * 3; + grad_points += batch_index * m * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + atomicAdd(grad_points + l * m + i1, grad_out[i] * w1); + atomicAdd(grad_points + l * m + i2, grad_out[i] * w2); + atomicAdd(grad_points + l * m + i3, grad_out[i] * w3); + } +} + +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_grad_kernel<<>>( + b, c, n, m, grad_out, idx, weight, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/sampling.cpp b/models/LL3DA/third_party/pointnet2/_ext_src/src/sampling.cpp new file mode 100644 index 0000000..de55822 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/sampling.cpp @@ -0,0 +1,88 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + +#include "sampling.h" +#include "utils.h" + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out); +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points); + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs); + +at::Tensor gather_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), points.data(), + idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, + const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n, + idx.size(1), grad_out.data(), + idx.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) { + CHECK_CONTIGUOUS(points); + CHECK_IS_FLOAT(points); + + at::Tensor output = + torch::zeros({points.size(0), nsamples}, + at::device(points.device()).dtype(at::ScalarType::Int)); + + at::Tensor tmp = + torch::full({points.size(0), points.size(1)}, 1e10, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + furthest_point_sampling_kernel_wrapper( + points.size(0), points.size(1), nsamples, points.data(), + tmp.data(), output.data()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/LL3DA/third_party/pointnet2/_ext_src/src/sampling_gpu.cu b/models/LL3DA/third_party/pointnet2/_ext_src/src/sampling_gpu.cu new file mode 100644 index 0000000..d2b3707 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/_ext_src/src/sampling_gpu.cu @@ -0,0 +1,232 @@ +// Copyright (c) Facebook, Inc. and its affiliates. + + +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, m) +// output: out(b, c, m) +__global__ void gather_points_kernel(int b, int c, int n, int m, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; + } + } + } +} + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out) { + gather_points_kernel<<>>(b, c, n, npoints, + points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, m) idx(b, m) +// output: grad_points(b, c, n) +__global__ void gather_points_grad_kernel(int b, int c, int n, int m, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + atomicAdd(grad_points + (i * c + l) * n + a, + grad_out[(i * c + l) * m + j]); + } + } + } +} + +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points) { + gather_points_grad_kernel<<>>( + b, c, n, npoints, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, + int idx1, int idx2) { + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +// Input dataset: (b, n, 3), tmp: (b, n) +// Ouput idxs (b, m) +template +__global__ void furthest_point_sampling_kernel( + int b, int n, int m, const float *__restrict__ dataset, + float *__restrict__ temp, int *__restrict__ idxs) { + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + if (mag <= 1e-3) continue; + + float d = + (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) idxs[j] = old; + } +} + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs) { + unsigned int n_threads = opt_n_threads(n); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + switch (n_threads) { + case 512: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 256: + furthest_point_sampling_kernel<256> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 128: + furthest_point_sampling_kernel<128> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 64: + furthest_point_sampling_kernel<64> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 32: + furthest_point_sampling_kernel<32> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 16: + furthest_point_sampling_kernel<16> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 8: + furthest_point_sampling_kernel<8> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 4: + furthest_point_sampling_kernel<4> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 2: + furthest_point_sampling_kernel<2> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 1: + furthest_point_sampling_kernel<1> + <<>>(b, n, m, dataset, temp, idxs); + break; + default: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + } + + CUDA_CHECK_ERRORS(); +} diff --git a/models/LL3DA/third_party/pointnet2/pointnet2_modules.py b/models/LL3DA/third_party/pointnet2/pointnet2_modules.py new file mode 100644 index 0000000..223f8e5 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/pointnet2_modules.py @@ -0,0 +1,522 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Pointnet2 layers. + +Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch +Extended with the following: +1. Uniform sampling in each local region (sample_uniformly) +2. Return sampled points indices to support votenet. +""" +import os +import sys + +import torch +import torch.nn as nn +import torch.nn.functional as F + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +from typing import List + +import pointnet2_utils +import pytorch_utils as pt_utils + + +class _PointnetSAModuleBase(nn.Module): + + def __init__(self): + super().__init__() + self.npoint = None + self.groupers = None + self.mlps = None + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, N, C) tensor of the descriptors of the the features + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, pointnet2_utils.furthest_point_sample( + xyz, self.npoint)).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features) # (B, C, npoint, nsample) + + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1) + + +class PointnetSAModuleMSG(_PointnetSAModuleBase): + r"""Pointnet set abstrction layer with multiscale grouping + + Parameters + ---------- + npoint : int + Number of features + radii : list of float32 + list of radii to group with + nsamples : list of int32 + Number of samples in each ball query + mlps : list of list of int32 + Spec of the pointnet before the global max_pool for each scale + bn : bool + Use batchnorm + """ + + def __init__(self, + *, + npoint: int, + radii: List[float], + nsamples: List[int], + mlps: List[List[int]], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly + ) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + +class PointnetSAModule(PointnetSAModuleMSG): + r"""Pointnet set abstrction layer + + Parameters + ---------- + npoint : int + Number of features + radius : float + Radius of ball + nsample : int + Number of samples in the ball query + mlp : list + Spec of the pointnet before the global max_pool + bn : bool + Use batchnorm + """ + + def __init__(self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True): + super().__init__(mlps=[mlp], + npoint=npoint, + radii=[radius], + nsamples=[nsample], + bn=bn, + use_xyz=use_xyz) + + +class PointnetSAModuleVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG with + extra support for returning point indices for getting their GT votes.""" + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True, + pooling: str = 'max', + sigma: float = None, # for RBF pooling + normalize_xyz: bool = False, # noramlize local XYZ with radius + sample_uniformly: bool = False, + ret_unique_cnt: bool = False): + super().__init__() + self.npoint = npoint + self.radius = radius + self.nsample = nsample + self.pooling = pooling + self.mlp_module = None + self.use_xyz = use_xyz + self.sigma = sigma + if self.sigma is None: + self.sigma = self.radius / 2 + self.normalize_xyz = normalize_xyz + self.ret_unique_cnt = ret_unique_cnt + + if npoint is not None: + self.grouper = pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + ret_grouped_xyz=True, + normalize_xyz=normalize_xyz, + sample_uniformly=sample_uniformly, + ret_unique_cnt=ret_unique_cnt) + else: + self.grouper = pointnet2_utils.GroupAll(use_xyz, + ret_grouped_xyz=True) + + mlp_spec = mlp + if use_xyz and len(mlp_spec) > 0: + mlp_spec[0] += 3 + self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, N) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + else: + assert (inds.shape[1] == self.npoint) + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, inds).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + if not self.ret_unique_cnt: + grouped_features, grouped_xyz = self.grouper( + xyz, new_xyz, features) # (B, C, npoint, nsample) + else: + grouped_features, grouped_xyz, unique_cnt = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint) + + new_features = self.mlp_module( + grouped_features) # (B, mlp[-1], npoint, nsample) + if self.pooling == 'max': + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + elif self.pooling == 'avg': + new_features = F.avg_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + elif self.pooling == 'rbf': + # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma) + # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel + rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1, keepdim=False) / + (self.sigma**2) / 2) # (B, npoint, nsample) + new_features = torch.sum( + new_features * rbf.unsqueeze(1), -1, keepdim=True) / float( + self.nsample) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + if not self.ret_unique_cnt: + return new_xyz, new_features, inds + else: + return new_xyz, new_features, inds, unique_cnt + + +class PointnetSAModuleMSGVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG with + extra support for returning point indices for getting their GT votes.""" + + def __init__(self, + *, + mlps: List[List[int]], + npoint: int, + radii: List[float], + nsamples: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert (len(mlps) == len(nsamples) == len(radii)) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly + ) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, C) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, inds).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features) # (B, C, npoint, nsample) + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), inds + + +class PointnetFPModule(nn.Module): + r"""Propigates the features of one set to another + + Parameters + ---------- + mlp : list + Pointnet module parameters + bn : bool + Use batchnorm + """ + + def __init__(self, *, mlp: List[int], bn: bool = True): + super().__init__() + self.mlp = pt_utils.SharedMLP(mlp, bn=bn) + + def forward(self, unknown: torch.Tensor, known: torch.Tensor, + unknow_feats: torch.Tensor, + known_feats: torch.Tensor) -> torch.Tensor: + r""" + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of the xyz positions of the unknown features + known : torch.Tensor + (B, m, 3) tensor of the xyz positions of the known features + unknow_feats : torch.Tensor + (B, C1, n) tensor of the features to be propigated to + known_feats : torch.Tensor + (B, C2, m) tensor of features to be propigated + + Returns + ------- + new_features : torch.Tensor + (B, mlp[-1], n) tensor of the features of the unknown features + """ + + if known is not None: + dist, idx = pointnet2_utils.three_nn(unknown, known) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate( + known_feats, idx, weight) + else: + interpolated_feats = known_feats.expand(*known_feats.size()[0:2], + unknown.size(1)) + + if unknow_feats is not None: + new_features = torch.cat([interpolated_feats, unknow_feats], + dim=1) #(B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlp(new_features) + + return new_features.squeeze(-1) + + +class PointnetLFPModuleMSG(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + learnable feature propagation layer.""" + + def __init__(self, + *, + mlps: List[List[int]], + radii: List[float], + nsamples: List[int], + post_mlp: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert (len(mlps) == len(nsamples) == len(radii)) + + self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor, + features2: torch.Tensor, + features1: torch.Tensor) -> torch.Tensor: + r""" Propagate features from xyz1 to xyz2. + Parameters + ---------- + xyz2 : torch.Tensor + (B, N2, 3) tensor of the xyz coordinates of the features + xyz1 : torch.Tensor + (B, N1, 3) tensor of the xyz coordinates of the features + features2 : torch.Tensor + (B, C2, N2) tensor of the descriptors of the the features + features1 : torch.Tensor + (B, C1, N1) tensor of the descriptors of the the features + + Returns + ------- + new_features1 : torch.Tensor + (B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors + """ + new_features_list = [] + + for i in range(len(self.groupers)): + new_features = self.groupers[i](xyz1, xyz2, + features1) # (B, C1, N2, nsample) + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], N2, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[1, + new_features.size(3) + ]) # (B, mlp[-1], N2, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], N2) + + if features2 is not None: + new_features = torch.cat([new_features, features2], + dim=1) #(B, mlp[-1] + C2, N2) + + new_features = new_features.unsqueeze(-1) + new_features = self.post_mlp(new_features) + + new_features_list.append(new_features) + + return torch.cat(new_features_list, dim=1).squeeze(-1) + + +if __name__ == '__main__': + from torch.autograd import Variable + torch.manual_seed(1) + torch.cuda.manual_seed_all(1) + xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True) + xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True) + + test_module = PointnetSAModuleMSG(npoint=2, + radii=[5.0, 10.0], + nsamples=[6, 3], + mlps=[[9, 3], [9, 6]]) + test_module.cuda() + print(test_module(xyz, xyz_feats)) + + for _ in range(1): + _, new_features = test_module(xyz, xyz_feats) + new_features.backward( + torch.cuda.FloatTensor(*new_features.size()).fill_(1)) + print(new_features) + print(xyz.grad) diff --git a/models/LL3DA/third_party/pointnet2/pointnet2_test.py b/models/LL3DA/third_party/pointnet2/pointnet2_test.py new file mode 100644 index 0000000..af2f21f --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/pointnet2_test.py @@ -0,0 +1,35 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Testing customized ops.""" + +import os +import sys + +import numpy as np +import torch +from torch.autograd import gradcheck + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +import pointnet2_utils + + +def test_interpolation_grad(): + batch_size = 1 + feat_dim = 2 + m = 4 + feats = torch.randn(batch_size, feat_dim, m, + requires_grad=True).float().cuda() + + def interpolate_func(inputs): + idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() + weight = torch.from_numpy(np.array([[[1, 1, 1], [2, 2, + 2]]])).float().cuda() + interpolated_feats = pointnet2_utils.three_interpolate( + inputs, idx, weight) + return interpolated_feats + + assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1)) + + +if __name__ == '__main__': + test_interpolation_grad() diff --git a/models/LL3DA/third_party/pointnet2/pointnet2_utils.py b/models/LL3DA/third_party/pointnet2/pointnet2_utils.py new file mode 100644 index 0000000..b9b0d5b --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/pointnet2_utils.py @@ -0,0 +1,432 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals, with_statement) + +import sys + +import pytorch_utils as pt_utils +import torch +import torch.nn as nn +from torch.autograd import Function + +try: + import builtins +except: + import __builtin__ as builtins + +try: + import pointnet2._ext as _ext +except ImportError: + if not getattr(builtins, '__POINTNET2_SETUP__', False): + raise ImportError( + 'Could not import _ext module.\n' + 'Please see the setup instructions in the README: ' + 'https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst' + ) + +if False: + # Workaround for type hints without depending on the `typing` module + from typing import * + + +class RandomDropout(nn.Module): + + def __init__(self, p=0.5, inplace=False): + super(RandomDropout, self).__init__() + self.p = p + self.inplace = inplace + + def forward(self, X): + theta = torch.Tensor(1).uniform_(0, self.p)[0] + return pt_utils.feature_dropout_no_scaling(X, theta, self.train, + self.inplace) + + +class FurthestPointSampling(Function): + + @staticmethod + def forward(ctx, xyz, npoint): + # type: (Any, torch.Tensor, int) -> torch.Tensor + r""" + Uses iterative furthest point sampling to select a set of npoint features that have the largest + minimum distance + + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor where N > npoint + npoint : int32 + number of features in the sampled set + + Returns + ------- + torch.Tensor + (B, npoint) tensor containing the set + """ + fps_inds = _ext.furthest_point_sampling(xyz, npoint) + ctx.mark_non_differentiable(fps_inds) + return fps_inds + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + + +class GatherOperation(Function): + + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor + + idx : torch.Tensor + (B, npoint) tensor of the features to gather + + Returns + ------- + torch.Tensor + (B, C, npoint) tensor + """ + + _, C, N = features.size() + + ctx.for_backwards = (idx, C, N) + + return _ext.gather_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + + grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) + return grad_features, None + + +gather_operation = GatherOperation.apply + + +class ThreeNN(Function): + + @staticmethod + def forward(ctx, unknown, known): + # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + Find the three nearest neighbors of unknown in known + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of known features + known : torch.Tensor + (B, m, 3) tensor of unknown features + + Returns + ------- + dist : torch.Tensor + (B, n, 3) l2 distance to the three nearest neighbors + idx : torch.Tensor + (B, n, 3) index of 3 nearest neighbors + """ + dist2, idx = _ext.three_nn(unknown, known) + + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + def forward(ctx, features, idx, weight): + # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor + r""" + Performs weight linear interpolation on 3 features + Parameters + ---------- + features : torch.Tensor + (B, c, m) Features descriptors to be interpolated from + idx : torch.Tensor + (B, n, 3) three nearest neighbors of the target features in features + weight : torch.Tensor + (B, n, 3) weights + + Returns + ------- + torch.Tensor + (B, c, n) tensor of the interpolated features + """ + B, c, m = features.size() + n = idx.size(1) + + ctx.three_interpolate_for_backward = (idx, weight, m) + + return _ext.three_interpolate(features, idx, weight) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + r""" + Parameters + ---------- + grad_out : torch.Tensor + (B, c, n) tensor with gradients of ouputs + + Returns + ------- + grad_features : torch.Tensor + (B, c, m) tensor with gradients of features + + None + + None + """ + idx, weight, m = ctx.three_interpolate_for_backward + + grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, + weight, m) + + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class GroupingOperation(Function): + + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor of features to group + idx : torch.Tensor + (B, npoint, nsample) tensor containing the indicies of features to group with + + Returns + ------- + torch.Tensor + (B, C, npoint, nsample) tensor + """ + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + + ctx.for_backwards = (idx, N) + + return _ext.group_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + + Parameters + ---------- + grad_out : torch.Tensor + (B, C, npoint, nsample) tensor of the gradients of the output from forward + + Returns + ------- + torch.Tensor + (B, C, N) gradient of the features + None + """ + idx, N = ctx.for_backwards + + grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) + + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius, nsample, xyz, new_xyz): + # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + radius : float + radius of the balls + nsample : int + maximum number of features in the balls + xyz : torch.Tensor + (B, N, 3) xyz coordinates of the features + new_xyz : torch.Tensor + (B, npoint, 3) centers of the ball query + + Returns + ------- + torch.Tensor + (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + inds = _ext.ball_query(new_xyz, xyz, radius, nsample) + ctx.mark_non_differentiable(inds) + return inds + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + r""" + Groups with a ball query of radius + + Parameters + --------- + radius : float32 + Radius of ball + nsample : int32 + Maximum number of features to gather in the ball + """ + + def __init__(self, + radius, + nsample, + use_xyz=True, + ret_grouped_xyz=False, + normalize_xyz=False, + sample_uniformly=False, + ret_unique_cnt=False): + # type: (QueryAndGroup, float, int, bool) -> None + super(QueryAndGroup, self).__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + self.ret_grouped_xyz = ret_grouped_xyz + self.normalize_xyz = normalize_xyz + self.sample_uniformly = sample_uniformly + self.ret_unique_cnt = ret_unique_cnt + if self.ret_unique_cnt: + assert (self.sample_uniformly) + + def forward(self, xyz, new_xyz, features=None): + # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + centriods (B, npoint, 3) + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, 3 + C, npoint, nsample) tensor + """ + idx = ball_query(self.radius, self.nsample, xyz, new_xyz) + + if self.sample_uniformly: + unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) + for i_batch in range(idx.shape[0]): + for i_region in range(idx.shape[1]): + unique_ind = torch.unique(idx[i_batch, i_region, :]) + num_unique = unique_ind.shape[0] + unique_cnt[i_batch, i_region] = num_unique + sample_ind = torch.randint(0, + num_unique, + (self.nsample - num_unique, ), + dtype=torch.long) + all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) + idx[i_batch, i_region, :] = all_ind + + xyz_trans = xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation(xyz_trans, + idx) # (B, 3, npoint, nsample) + grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) + if self.normalize_xyz: + grouped_xyz /= self.radius + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) # (B, C + 3, npoint, nsample) + else: + new_features = grouped_features + else: + assert (self.use_xyz + ), 'Cannot have not features and not use xyz as a feature!' + new_features = grouped_xyz + + ret = [new_features] + if self.ret_grouped_xyz: + ret.append(grouped_xyz) + if self.ret_unique_cnt: + ret.append(unique_cnt) + if len(ret) == 1: + return ret[0] + else: + return tuple(ret) + + +class GroupAll(nn.Module): + r""" + Groups all features + + Parameters + --------- + """ + + def __init__(self, use_xyz=True, ret_grouped_xyz=False): + # type: (GroupAll, bool) -> None + super(GroupAll, self).__init__() + self.use_xyz = use_xyz + + def forward(self, xyz, new_xyz, features=None): + # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + Ignored + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, C + 3, 1, N) tensor + """ + + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) # (B, 3 + C, 1, N) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + if self.ret_grouped_xyz: + return new_features, grouped_xyz + else: + return new_features diff --git a/models/LL3DA/third_party/pointnet2/pytorch_utils.py b/models/LL3DA/third_party/pointnet2/pytorch_utils.py new file mode 100644 index 0000000..bd5805e --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/pytorch_utils.py @@ -0,0 +1,270 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch.""" +from typing import List, Tuple + +import torch +import torch.nn as nn + + +class SharedMLP(nn.Sequential): + + def __init__(self, + args: List[int], + *, + bn: bool = False, + activation=nn.ReLU(inplace=True), + preact: bool = False, + first: bool = False, + name: str = ''): + super().__init__() + + for i in range(len(args) - 1): + self.add_module( + name + 'layer{}'.format(i), + Conv2d(args[i], + args[i + 1], + bn=(not first or not preact or (i != 0)) and bn, + activation=activation if + (not first or not preact or (i != 0)) else None, + preact=preact)) + + +class _BNBase(nn.Sequential): + + def __init__(self, in_size, batch_norm=None, name=''): + super().__init__() + self.add_module(name + 'bn', batch_norm(in_size)) + + nn.init.constant_(self[0].weight, 1.0) + nn.init.constant_(self[0].bias, 0) + + +class BatchNorm1d(_BNBase): + + def __init__(self, in_size: int, *, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) + + +class BatchNorm2d(_BNBase): + + def __init__(self, in_size: int, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) + + +class BatchNorm3d(_BNBase): + + def __init__(self, in_size: int, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name) + + +class _ConvBase(nn.Sequential): + + def __init__(self, + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=None, + batch_norm=None, + bias=True, + preact=False, + name=''): + super().__init__() + + bias = bias and (not bn) + conv_unit = conv(in_size, + out_size, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias) + init(conv_unit.weight) + if bias: + nn.init.constant_(conv_unit.bias, 0) + + if bn: + if not preact: + bn_unit = batch_norm(out_size) + else: + bn_unit = batch_norm(in_size) + + if preact: + if bn: + self.add_module(name + 'bn', bn_unit) + + if activation is not None: + self.add_module(name + 'activation', activation) + + self.add_module(name + 'conv', conv_unit) + + if not preact: + if bn: + self.add_module(name + 'bn', bn_unit) + + if activation is not None: + self.add_module(name + 'activation', activation) + + +class Conv1d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv1d, + batch_norm=BatchNorm1d, + bias=bias, + preact=preact, + name=name) + + +class Conv2d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int] = (1, 1), + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv2d, + batch_norm=BatchNorm2d, + bias=bias, + preact=preact, + name=name) + + +class Conv3d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int, int] = (1, 1, 1), + stride: Tuple[int, int, int] = (1, 1, 1), + padding: Tuple[int, int, int] = (0, 0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv3d, + batch_norm=BatchNorm3d, + bias=bias, + preact=preact, + name=name) + + +class FC(nn.Sequential): + + def __init__(self, + in_size: int, + out_size: int, + *, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=None, + preact: bool = False, + name: str = ''): + super().__init__() + + fc = nn.Linear(in_size, out_size, bias=not bn) + if init is not None: + init(fc.weight) + if not bn: + nn.init.constant_(fc.bias, 0) + + if preact: + if bn: + self.add_module(name + 'bn', BatchNorm1d(in_size)) + + if activation is not None: + self.add_module(name + 'activation', activation) + + self.add_module(name + 'fc', fc) + + if not preact: + if bn: + self.add_module(name + 'bn', BatchNorm1d(out_size)) + + if activation is not None: + self.add_module(name + 'activation', activation) + + +def set_bn_momentum_default(bn_momentum): + + def fn(m): + if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.momentum = bn_momentum + + return fn + + +class BNMomentumScheduler(object): + + def __init__(self, + model, + bn_lambda, + last_epoch=-1, + setter=set_bn_momentum_default): + if not isinstance(model, nn.Module): + raise RuntimeError("Class '{}' is not a PyTorch nn Module".format( + type(model).__name__)) + + self.model = model + self.setter = setter + self.lmbd = bn_lambda + + self.step(last_epoch + 1) + self.last_epoch = last_epoch + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + + self.last_epoch = epoch + self.model.apply(self.setter(self.lmbd(epoch))) diff --git a/models/LL3DA/third_party/pointnet2/setup.py b/models/LL3DA/third_party/pointnet2/setup.py new file mode 100644 index 0000000..b988143 --- /dev/null +++ b/models/LL3DA/third_party/pointnet2/setup.py @@ -0,0 +1,33 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import glob +import os.path as osp + +from setuptools import setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +this_dir = osp.dirname(osp.abspath(__file__)) + +_ext_src_root = '_ext_src' +_ext_sources = glob.glob('{}/src/*.cpp'.format(_ext_src_root)) + glob.glob( + '{}/src/*.cu'.format(_ext_src_root)) +_ext_headers = glob.glob('{}/include/*'.format(_ext_src_root)) + +setup(name='pointnet2', + ext_modules=[ + CUDAExtension( + name='pointnet2._ext', + sources=_ext_sources, + extra_compile_args={ + 'cxx': + ['-O2', '-I{}'.format('{}/include'.format(_ext_src_root))], + 'nvcc': + ['-O2', '-I{}'.format('{}/include'.format(_ext_src_root))], + }, + include_dirs=[osp.join(this_dir, _ext_src_root, 'include')], + ) + ], + cmdclass={'build_ext': BuildExtension}) diff --git a/models/LL3DA/utils/ap_calculator.py b/models/LL3DA/utils/ap_calculator.py new file mode 100644 index 0000000..48f0997 --- /dev/null +++ b/models/LL3DA/utils/ap_calculator.py @@ -0,0 +1,421 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Helper functions and class to calculate Average Precisions for 3D object +detection.""" +import logging +import os +import sys +from collections import OrderedDict + +import numpy as np +import scipy.special as scipy_special +import torch +from utils.box_util import (extract_pc_in_box3d, flip_axis_to_camera_np, + get_3d_box, get_3d_box_batch) +from utils.eval_det import eval_det_multiprocessing, get_iou_obb +from utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls + + +def flip_axis_to_depth(pc): + pc2 = np.copy(pc) + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # depth X,Y,Z = cam X,Z,-Y + pc2[..., 2] *= -1 + return pc2 + + +def softmax(x): + """Numpy function for softmax.""" + shape = x.shape + probs = np.exp(x - np.max(x, axis=len(shape) - 1, keepdims=True)) + probs /= np.sum(probs, axis=len(shape) - 1, keepdims=True) + return probs + + +# This is exactly the same as VoteNet so that we can compare evaluations. +def parse_predictions(predicted_boxes, sem_cls_probs, objectness_probs, + point_cloud, config_dict): + """Parse predictions to OBB parameters and suppress overlapping boxes. + + Args: + end_points: dict + {point_clouds, center, heading_scores, heading_residuals, + size_scores, size_residuals, sem_cls_scores} + config_dict: dict + {dataset_config, remove_empty_box, use_3d_nms, nms_iou, + use_old_type_nms, conf_thresh, per_class_proposal} + + Returns: + batch_pred_map_cls: a list of len == batch size (BS) + [pred_list_i], i = 0, 1, ..., BS-1 + where pred_list_i = [(pred_sem_cls, box_params, box_score)_j] + where j = 0, ..., num of valid detections - 1 from sample input i + """ + + sem_cls_probs = sem_cls_probs.detach().cpu().numpy() # B,num_proposal,10 + pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal + pred_sem_cls = np.argmax(sem_cls_probs, -1) + obj_prob = objectness_probs.detach().cpu().numpy() + + pred_corners_3d_upright_camera = predicted_boxes.detach().cpu().numpy() + + K = pred_corners_3d_upright_camera.shape[1] # K==num_proposal + bsize = pred_corners_3d_upright_camera.shape[0] + nonempty_box_mask = np.ones((bsize, K)) + + if config_dict['remove_empty_box']: + # ------------------------------------- + # Remove predicted boxes without any point within them.. + batch_pc = point_cloud.cpu().numpy()[:, :, 0:3] # B,N,3 + for i in range(bsize): + pc = batch_pc[i, :, :] # (N,3) + for j in range(K): + box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3) + box3d = flip_axis_to_depth(box3d) + pc_in_box, inds = extract_pc_in_box3d(pc, box3d) + if len(pc_in_box) < 5: + nonempty_box_mask[i, j] = 0 + if nonempty_box_mask[i].sum() == 0: + nonempty_box_mask[i, obj_prob[i].argmax()] = 1 + # ------------------------------------- + + if 'no_nms' in config_dict and config_dict['no_nms']: + # pred_mask = np.ones((bsize, K)) + pred_mask = nonempty_box_mask + elif not config_dict['use_3d_nms']: + # ---------- NMS input: pred_with_prob in (B,K,7) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_2d_with_prob = np.zeros((K, 5)) + for j in range(K): + boxes_2d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_2d_with_prob[j, 2] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_2d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_2d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_2d_with_prob[j, 4] = obj_prob[i, j] + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + assert len(nonempty_box_inds) > 0 + pick = nms_2d_faster( + boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], + config_dict['use_old_type_nms'], + ) + assert len(pick) > 0 + pred_mask[i, nonempty_box_inds[pick]] = 1 + # ---------- NMS output: pred_mask in (B,K) ----------- + elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']): + # ---------- NMS input: pred_with_prob in (B,K,7) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_3d_with_prob = np.zeros((K, 7)) + for j in range(K): + boxes_3d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 2] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 4] = np.max( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 5] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 6] = obj_prob[i, j] + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + assert len(nonempty_box_inds) > 0 + pick = nms_3d_faster( + boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], + config_dict['use_old_type_nms'], + ) + assert len(pick) > 0 + pred_mask[i, nonempty_box_inds[pick]] = 1 + # ---------- NMS output: pred_mask in (B,K) ----------- + elif config_dict['use_3d_nms'] and config_dict['cls_nms']: + # ---------- NMS input: pred_with_prob in (B,K,8) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_3d_with_prob = np.zeros((K, 8)) + for j in range(K): + boxes_3d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 2] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 4] = np.max( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 5] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 6] = obj_prob[i, j] + boxes_3d_with_prob[j, 7] = pred_sem_cls[ + i, + j] # only suppress if the two boxes are of the same class!! + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + assert len(nonempty_box_inds) > 0 + pick = nms_3d_faster_samecls( + boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], + config_dict['use_old_type_nms'], + ) + assert len(pick) > 0 + pred_mask[i, nonempty_box_inds[pick]] = 1 + # ---------- NMS output: pred_mask in (B,K) ----------- + + batch_pred_map_cls = ( + [] + ) # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1) + for i in range(bsize): + if config_dict['per_class_proposal']: + assert config_dict['use_cls_confidence_only'] is False + cur_list = [] + for ii in range(config_dict['dataset_config'].num_semcls): + cur_list += [( + ii, + pred_corners_3d_upright_camera[i, j], + sem_cls_probs[i, j, ii] * obj_prob[i, j], + ) for j in range(pred_corners_3d_upright_camera.shape[1]) + if pred_mask[i, j] == 1 + and obj_prob[i, j] > config_dict['conf_thresh']] + batch_pred_map_cls.append(cur_list) + elif config_dict['use_cls_confidence_only']: + batch_pred_map_cls.append([ + ( + pred_sem_cls[i, j].item(), + pred_corners_3d_upright_camera[i, j], + sem_cls_probs[i, j, pred_sem_cls[i, j].item()], + ) for j in range(pred_corners_3d_upright_camera.shape[1]) + if pred_mask[i, j] == 1 + and obj_prob[i, j] > config_dict['conf_thresh'] + ]) + else: + batch_pred_map_cls.append([ + ( + pred_sem_cls[i, j].item(), + pred_corners_3d_upright_camera[i, j], + obj_prob[i, j], + ) for j in range(pred_corners_3d_upright_camera.shape[1]) + if pred_mask[i, j] == 1 + and obj_prob[i, j] > config_dict['conf_thresh'] + ]) + + return batch_pred_map_cls + + +def get_ap_config_dict( + remove_empty_box=True, + use_3d_nms=True, + nms_iou=0.25, + use_old_type_nms=False, + cls_nms=True, + per_class_proposal=True, + use_cls_confidence_only=False, + conf_thresh=0.05, + no_nms=False, + dataset_config=None, +): + """Default mAP evaluation settings for VoteNet.""" + + config_dict = { + 'remove_empty_box': remove_empty_box, + 'use_3d_nms': use_3d_nms, + 'nms_iou': nms_iou, + 'use_old_type_nms': use_old_type_nms, + 'cls_nms': cls_nms, + 'per_class_proposal': per_class_proposal, + 'use_cls_confidence_only': use_cls_confidence_only, + 'conf_thresh': conf_thresh, + 'no_nms': no_nms, + 'dataset_config': dataset_config, + } + return config_dict + + +class APCalculator(object): + """Calculating Average Precision.""" + + def __init__( + self, + dataset_config, + ap_iou_thresh=[0.25, 0.5], + class2type_map=None, + exact_eval=True, + ap_config_dict=None, + ): + """ + Args: + ap_iou_thresh: List of float between 0 and 1.0 + IoU threshold to judge whether a prediction is positive. + class2type_map: [optional] dict {class_int:class_name} + """ + self.ap_iou_thresh = ap_iou_thresh + if ap_config_dict is None: + ap_config_dict = get_ap_config_dict(dataset_config=dataset_config, + remove_empty_box=exact_eval) + self.ap_config_dict = ap_config_dict + self.class2type_map = class2type_map + self.reset() + + def make_gt_list(self, gt_box_corners, gt_box_sem_cls_labels, + gt_box_present): + batch_gt_map_cls = [] + bsize = gt_box_corners.shape[0] + for i in range(bsize): + batch_gt_map_cls.append([(gt_box_sem_cls_labels[i, j].item(), + gt_box_corners[i, j]) + for j in range(gt_box_corners.shape[1]) + if gt_box_present[i, j] == 1]) + return batch_gt_map_cls + + def step_meter(self, outputs, targets): + if 'outputs' in outputs: + outputs = outputs['outputs'] + self.step( + predicted_box_corners=outputs['box_corners'], + sem_cls_probs=outputs['sem_cls_prob'], + objectness_probs=outputs['objectness_prob'], + point_cloud=targets['point_clouds'], + gt_box_corners=targets['gt_box_corners'], + gt_box_sem_cls_labels=targets['gt_box_sem_cls_label'], + gt_box_present=targets['gt_box_present'], + ) + + def step( + self, + predicted_box_corners, + sem_cls_probs, + objectness_probs, + point_cloud, + gt_box_corners, + gt_box_sem_cls_labels, + gt_box_present, + ): + """Perform NMS on predicted boxes and threshold them according to + score. + + Convert GT boxes + """ + gt_box_corners = gt_box_corners.cpu().detach().numpy() + gt_box_sem_cls_labels = gt_box_sem_cls_labels.cpu().detach().numpy() + gt_box_present = gt_box_present.cpu().detach().numpy() + batch_gt_map_cls = self.make_gt_list(gt_box_corners, + gt_box_sem_cls_labels, + gt_box_present) + + batch_pred_map_cls = parse_predictions( + predicted_box_corners, + sem_cls_probs, + objectness_probs, + point_cloud, + self.ap_config_dict, + ) + + self.accumulate(batch_pred_map_cls, batch_gt_map_cls) + + def accumulate(self, batch_pred_map_cls, batch_gt_map_cls): + """Accumulate one batch of prediction and groundtruth. + + Args: + batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...] + batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...] + should have the same length with batch_pred_map_cls (batch_size) + """ + bsize = len(batch_pred_map_cls) + assert bsize == len(batch_gt_map_cls) + for i in range(bsize): + self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i] + self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i] + self.scan_cnt += 1 + + def compute_metrics(self): + """Use accumulated predictions and groundtruths to compute Average + Precision.""" + overall_ret = OrderedDict() + for ap_iou_thresh in self.ap_iou_thresh: + ret_dict = OrderedDict() + rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, + self.gt_map_cls, + ovthresh=ap_iou_thresh) + for key in sorted(ap.keys()): + clsname = self.class2type_map[ + key] if self.class2type_map else str(key) + ret_dict['%s Average Precision' % (clsname)] = ap[key] + ap_vals = np.array(list(ap.values()), dtype=np.float32) + ap_vals[np.isnan(ap_vals)] = 0 + ret_dict['mAP'] = ap_vals.mean() + rec_list = [] + for key in sorted(ap.keys()): + clsname = self.class2type_map[ + key] if self.class2type_map else str(key) + try: + ret_dict['%s Recall' % (clsname)] = rec[key][-1] + rec_list.append(rec[key][-1]) + except: + ret_dict['%s Recall' % (clsname)] = 0 + rec_list.append(0) + ret_dict['AR'] = np.mean(rec_list) + overall_ret[ap_iou_thresh] = ret_dict + return overall_ret + + def __str__(self): + overall_ret = self.compute_metrics() + return self.metrics_to_str(overall_ret) + + def metrics_to_str(self, overall_ret, per_class=True): + mAP_strs = [] + AR_strs = [] + per_class_metrics = [] + for ap_iou_thresh in self.ap_iou_thresh: + mAP = overall_ret[ap_iou_thresh]['mAP'] * 100 + mAP_strs.append(f'{mAP:.2f}') + ar = overall_ret[ap_iou_thresh]['AR'] * 100 + AR_strs.append(f'{ar:.2f}') + + if per_class: + # per-class metrics + per_class_metrics.append('-' * 5) + per_class_metrics.append(f'IOU Thresh={ap_iou_thresh}') + for x in list(overall_ret[ap_iou_thresh].keys()): + if x == 'mAP' or x == 'AR': + pass + else: + met_str = f'{x}: {overall_ret[ap_iou_thresh][x]*100:.2f}' + per_class_metrics.append(met_str) + + ap_header = [f'mAP{x:.2f}' for x in self.ap_iou_thresh] + ap_str = ', '.join(ap_header) + ap_str += ': ' + ', '.join(mAP_strs) + ap_str += '\n' + + ar_header = [f'AR{x:.2f}' for x in self.ap_iou_thresh] + ap_str += ', '.join(ar_header) + ap_str += ': ' + ', '.join(AR_strs) + + if per_class: + per_class_metrics = '\n'.join(per_class_metrics) + ap_str += '\n' + ap_str += per_class_metrics + + return ap_str + + def metrics_to_dict(self, overall_ret): + metrics_dict = {} + for ap_iou_thresh in self.ap_iou_thresh: + metrics_dict[f'mAP_{ap_iou_thresh}'] = ( + overall_ret[ap_iou_thresh]['mAP'] * 100) + metrics_dict[ + f'AR_{ap_iou_thresh}'] = overall_ret[ap_iou_thresh]['AR'] * 100 + return metrics_dict + + def reset(self): + self.gt_map_cls = {} # {scan_id: [(classname, bbox)]} + self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]} + self.scan_cnt = 0 diff --git a/models/LL3DA/utils/box_intersection.c b/models/LL3DA/utils/box_intersection.c new file mode 100644 index 0000000..349fd2a --- /dev/null +++ b/models/LL3DA/utils/box_intersection.c @@ -0,0 +1,32752 @@ +/* Generated by Cython 3.0.2 */ + +/* BEGIN: Cython Metadata +{ + "distutils": { + "depends": [ + "/opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/core/include/numpy/arrayobject.h", + "/opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/core/include/numpy/arrayscalars.h", + "/opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/core/include/numpy/ndarrayobject.h", + "/opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/core/include/numpy/ndarraytypes.h", + "/opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/core/include/numpy/ufuncobject.h" + ], + "include_dirs": [ + "/opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/core/include/" + ], + "name": "box_intersection", + "sources": [ + "box_intersection.pyx" + ] + }, + "module_name": "box_intersection" +} +END: Cython Metadata */ + +#ifndef PY_SSIZE_T_CLEAN +#define PY_SSIZE_T_CLEAN +#endif /* PY_SSIZE_T_CLEAN */ +#if defined(CYTHON_LIMITED_API) && 0 + #ifndef Py_LIMITED_API + #if CYTHON_LIMITED_API+0 > 0x03030000 + #define Py_LIMITED_API CYTHON_LIMITED_API + #else + #define Py_LIMITED_API 0x03030000 + #endif + #endif +#endif + +#include "Python.h" +#ifndef Py_PYTHON_H + #error Python headers needed to compile C extensions, please install development version of Python. +#elif PY_VERSION_HEX < 0x02070000 || (0x03000000 <= PY_VERSION_HEX && PY_VERSION_HEX < 0x03030000) + #error Cython requires Python 2.7+ or Python 3.3+. +#else +#if CYTHON_LIMITED_API +#define __PYX_EXTRA_ABI_MODULE_NAME "limited" +#else +#define __PYX_EXTRA_ABI_MODULE_NAME "" +#endif +#define CYTHON_ABI "3_0_2" __PYX_EXTRA_ABI_MODULE_NAME +#define __PYX_ABI_MODULE_NAME "_cython_" CYTHON_ABI +#define __PYX_TYPE_MODULE_PREFIX __PYX_ABI_MODULE_NAME "." +#define CYTHON_HEX_VERSION 0x030002F0 +#define CYTHON_FUTURE_DIVISION 1 +#include +#ifndef offsetof + #define offsetof(type, member) ( (size_t) & ((type*)0) -> member ) +#endif +#if !defined(_WIN32) && !defined(WIN32) && !defined(MS_WINDOWS) + #ifndef __stdcall + #define __stdcall + #endif + #ifndef __cdecl + #define __cdecl + #endif + #ifndef __fastcall + #define __fastcall + #endif +#endif +#ifndef DL_IMPORT + #define DL_IMPORT(t) t +#endif +#ifndef DL_EXPORT + #define DL_EXPORT(t) t +#endif +#define __PYX_COMMA , +#ifndef HAVE_LONG_LONG + #define HAVE_LONG_LONG +#endif +#ifndef PY_LONG_LONG + #define PY_LONG_LONG LONG_LONG +#endif +#ifndef Py_HUGE_VAL + #define Py_HUGE_VAL HUGE_VAL +#endif +#define __PYX_LIMITED_VERSION_HEX PY_VERSION_HEX +#if defined(GRAALVM_PYTHON) + /* For very preliminary testing purposes. Most variables are set the same as PyPy. + The existence of this section does not imply that anything works or is even tested */ + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 1 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif +#elif defined(PYPY_VERSION) + #define CYTHON_COMPILING_IN_PYPY 1 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #undef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 1 + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS (PY_MAJOR_VERSION >= 3) + #endif + #if PY_VERSION_HEX < 0x03090000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE (PY_VERSION_HEX >= 0x030400a1 && PYPY_VERSION_NUM >= 0x07030C00) + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif +#elif defined(CYTHON_LIMITED_API) + #ifdef Py_LIMITED_API + #undef __PYX_LIMITED_VERSION_HEX + #define __PYX_LIMITED_VERSION_HEX Py_LIMITED_API + #endif + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 1 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #undef CYTHON_CLINE_IN_TRACEBACK + #define CYTHON_CLINE_IN_TRACEBACK 0 + #undef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 0 + #undef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 1 + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #undef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #endif + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #undef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 0 + #undef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 0 + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL 0 + #undef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #undef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 1 + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 0 + #endif +#elif defined(PY_NOGIL) + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 0 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 1 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #undef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 0 + #ifndef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #undef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 0 + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #undef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 0 + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #undef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 0 + #undef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 0 + #ifndef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 +#else + #define CYTHON_COMPILING_IN_PYPY 0 + #define CYTHON_COMPILING_IN_CPYTHON 1 + #define CYTHON_COMPILING_IN_LIMITED_API 0 + #define CYTHON_COMPILING_IN_GRAAL 0 + #define CYTHON_COMPILING_IN_NOGIL 0 + #ifndef CYTHON_USE_TYPE_SLOTS + #define CYTHON_USE_TYPE_SLOTS 1 + #endif + #ifndef CYTHON_USE_TYPE_SPECS + #define CYTHON_USE_TYPE_SPECS 0 + #endif + #ifndef CYTHON_USE_PYTYPE_LOOKUP + #define CYTHON_USE_PYTYPE_LOOKUP 1 + #endif + #if PY_MAJOR_VERSION < 3 + #undef CYTHON_USE_ASYNC_SLOTS + #define CYTHON_USE_ASYNC_SLOTS 0 + #elif !defined(CYTHON_USE_ASYNC_SLOTS) + #define CYTHON_USE_ASYNC_SLOTS 1 + #endif + #ifndef CYTHON_USE_PYLONG_INTERNALS + #define CYTHON_USE_PYLONG_INTERNALS 1 + #endif + #ifndef CYTHON_USE_PYLIST_INTERNALS + #define CYTHON_USE_PYLIST_INTERNALS 1 + #endif + #ifndef CYTHON_USE_UNICODE_INTERNALS + #define CYTHON_USE_UNICODE_INTERNALS 1 + #endif + #if PY_VERSION_HEX < 0x030300F0 || PY_VERSION_HEX >= 0x030B00A2 + #undef CYTHON_USE_UNICODE_WRITER + #define CYTHON_USE_UNICODE_WRITER 0 + #elif !defined(CYTHON_USE_UNICODE_WRITER) + #define CYTHON_USE_UNICODE_WRITER 1 + #endif + #ifndef CYTHON_AVOID_BORROWED_REFS + #define CYTHON_AVOID_BORROWED_REFS 0 + #endif + #ifndef CYTHON_ASSUME_SAFE_MACROS + #define CYTHON_ASSUME_SAFE_MACROS 1 + #endif + #ifndef CYTHON_UNPACK_METHODS + #define CYTHON_UNPACK_METHODS 1 + #endif + #ifndef CYTHON_FAST_THREAD_STATE + #define CYTHON_FAST_THREAD_STATE 1 + #endif + #ifndef CYTHON_FAST_GIL + #define CYTHON_FAST_GIL (PY_MAJOR_VERSION < 3 || PY_VERSION_HEX >= 0x03060000 && PY_VERSION_HEX < 0x030C00A6) + #endif + #ifndef CYTHON_METH_FASTCALL + #define CYTHON_METH_FASTCALL (PY_VERSION_HEX >= 0x030700A1) + #endif + #ifndef CYTHON_FAST_PYCALL + #define CYTHON_FAST_PYCALL 1 + #endif + #ifndef CYTHON_PEP487_INIT_SUBCLASS + #define CYTHON_PEP487_INIT_SUBCLASS 1 + #endif + #if PY_VERSION_HEX < 0x03050000 + #undef CYTHON_PEP489_MULTI_PHASE_INIT + #define CYTHON_PEP489_MULTI_PHASE_INIT 0 + #elif !defined(CYTHON_PEP489_MULTI_PHASE_INIT) + #define CYTHON_PEP489_MULTI_PHASE_INIT 1 + #endif + #ifndef CYTHON_USE_MODULE_STATE + #define CYTHON_USE_MODULE_STATE 0 + #endif + #if PY_VERSION_HEX < 0x030400a1 + #undef CYTHON_USE_TP_FINALIZE + #define CYTHON_USE_TP_FINALIZE 0 + #elif !defined(CYTHON_USE_TP_FINALIZE) + #define CYTHON_USE_TP_FINALIZE 1 + #endif + #if PY_VERSION_HEX < 0x030600B1 + #undef CYTHON_USE_DICT_VERSIONS + #define CYTHON_USE_DICT_VERSIONS 0 + #elif !defined(CYTHON_USE_DICT_VERSIONS) + #define CYTHON_USE_DICT_VERSIONS (PY_VERSION_HEX < 0x030C00A5) + #endif + #if PY_VERSION_HEX < 0x030700A3 + #undef CYTHON_USE_EXC_INFO_STACK + #define CYTHON_USE_EXC_INFO_STACK 0 + #elif !defined(CYTHON_USE_EXC_INFO_STACK) + #define CYTHON_USE_EXC_INFO_STACK 1 + #endif + #ifndef CYTHON_UPDATE_DESCRIPTOR_DOC + #define CYTHON_UPDATE_DESCRIPTOR_DOC 1 + #endif +#endif +#if !defined(CYTHON_FAST_PYCCALL) +#define CYTHON_FAST_PYCCALL (CYTHON_FAST_PYCALL && PY_VERSION_HEX >= 0x030600B1) +#endif +#if !defined(CYTHON_VECTORCALL) +#define CYTHON_VECTORCALL (CYTHON_FAST_PYCCALL && PY_VERSION_HEX >= 0x030800B1) +#endif +#define CYTHON_BACKPORT_VECTORCALL (CYTHON_METH_FASTCALL && PY_VERSION_HEX < 0x030800B1) +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_MAJOR_VERSION < 3 + #include "longintrepr.h" + #endif + #undef SHIFT + #undef BASE + #undef MASK + #ifdef SIZEOF_VOID_P + enum { __pyx_check_sizeof_voidp = 1 / (int)(SIZEOF_VOID_P == sizeof(void*)) }; + #endif +#endif +#ifndef __has_attribute + #define __has_attribute(x) 0 +#endif +#ifndef __has_cpp_attribute + #define __has_cpp_attribute(x) 0 +#endif +#ifndef CYTHON_RESTRICT + #if defined(__GNUC__) + #define CYTHON_RESTRICT __restrict__ + #elif defined(_MSC_VER) && _MSC_VER >= 1400 + #define CYTHON_RESTRICT __restrict + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_RESTRICT restrict + #else + #define CYTHON_RESTRICT + #endif +#endif +#ifndef CYTHON_UNUSED + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(maybe_unused) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(maybe_unused) + #define CYTHON_UNUSED [[maybe_unused]] + #endif + #endif + #endif +#endif +#ifndef CYTHON_UNUSED +# if defined(__GNUC__) +# if !(defined(__cplusplus)) || (__GNUC__ > 3 || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +# elif defined(__ICC) || (defined(__INTEL_COMPILER) && !defined(_MSC_VER)) +# define CYTHON_UNUSED __attribute__ ((__unused__)) +# else +# define CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_UNUSED_VAR +# if defined(__cplusplus) + template void CYTHON_UNUSED_VAR( const T& ) { } +# else +# define CYTHON_UNUSED_VAR(x) (void)(x) +# endif +#endif +#ifndef CYTHON_MAYBE_UNUSED_VAR + #define CYTHON_MAYBE_UNUSED_VAR(x) CYTHON_UNUSED_VAR(x) +#endif +#ifndef CYTHON_NCP_UNUSED +# if CYTHON_COMPILING_IN_CPYTHON +# define CYTHON_NCP_UNUSED +# else +# define CYTHON_NCP_UNUSED CYTHON_UNUSED +# endif +#endif +#ifndef CYTHON_USE_CPP_STD_MOVE + #if defined(__cplusplus) && (\ + __cplusplus >= 201103L || (defined(_MSC_VER) && _MSC_VER >= 1600)) + #define CYTHON_USE_CPP_STD_MOVE 1 + #else + #define CYTHON_USE_CPP_STD_MOVE 0 + #endif +#endif +#define __Pyx_void_to_None(void_result) ((void)(void_result), Py_INCREF(Py_None), Py_None) +#ifdef _MSC_VER + #ifndef _MSC_STDINT_H_ + #if _MSC_VER < 1300 + typedef unsigned char uint8_t; + typedef unsigned short uint16_t; + typedef unsigned int uint32_t; + #else + typedef unsigned __int8 uint8_t; + typedef unsigned __int16 uint16_t; + typedef unsigned __int32 uint32_t; + #endif + #endif + #if _MSC_VER < 1300 + #ifdef _WIN64 + typedef unsigned long long __pyx_uintptr_t; + #else + typedef unsigned int __pyx_uintptr_t; + #endif + #else + #ifdef _WIN64 + typedef unsigned __int64 __pyx_uintptr_t; + #else + typedef unsigned __int32 __pyx_uintptr_t; + #endif + #endif +#else + #include + typedef uintptr_t __pyx_uintptr_t; +#endif +#ifndef CYTHON_FALLTHROUGH + #if defined(__cplusplus) + /* for clang __has_cpp_attribute(fallthrough) is true even before C++17 + * but leads to warnings with -pedantic, since it is a C++17 feature */ + #if ((defined(_MSVC_LANG) && _MSVC_LANG >= 201703L) || __cplusplus >= 201703L) + #if __has_cpp_attribute(fallthrough) + #define CYTHON_FALLTHROUGH [[fallthrough]] + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_cpp_attribute(clang::fallthrough) + #define CYTHON_FALLTHROUGH [[clang::fallthrough]] + #elif __has_cpp_attribute(gnu::fallthrough) + #define CYTHON_FALLTHROUGH [[gnu::fallthrough]] + #endif + #endif + #endif + #ifndef CYTHON_FALLTHROUGH + #if __has_attribute(fallthrough) + #define CYTHON_FALLTHROUGH __attribute__((fallthrough)) + #else + #define CYTHON_FALLTHROUGH + #endif + #endif + #if defined(__clang__) && defined(__apple_build_version__) + #if __apple_build_version__ < 7000000 + #undef CYTHON_FALLTHROUGH + #define CYTHON_FALLTHROUGH + #endif + #endif +#endif +#ifdef __cplusplus + template + struct __PYX_IS_UNSIGNED_IMPL {static const bool value = T(0) < T(-1);}; + #define __PYX_IS_UNSIGNED(type) (__PYX_IS_UNSIGNED_IMPL::value) +#else + #define __PYX_IS_UNSIGNED(type) (((type)-1) > 0) +#endif +#if CYTHON_COMPILING_IN_PYPY == 1 + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x030A0000) +#else + #define __PYX_NEED_TP_PRINT_SLOT (PY_VERSION_HEX >= 0x030800b4 && PY_VERSION_HEX < 0x03090000) +#endif +#define __PYX_REINTERPRET_FUNCION(func_pointer, other_pointer) ((func_pointer)(void(*)(void))(other_pointer)) + +#ifndef CYTHON_INLINE + #if defined(__clang__) + #define CYTHON_INLINE __inline__ __attribute__ ((__unused__)) + #elif defined(__GNUC__) + #define CYTHON_INLINE __inline__ + #elif defined(_MSC_VER) + #define CYTHON_INLINE __inline + #elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define CYTHON_INLINE inline + #else + #define CYTHON_INLINE + #endif +#endif + +#define __PYX_BUILD_PY_SSIZE_T "n" +#define CYTHON_FORMAT_SSIZE_T "z" +#if PY_MAJOR_VERSION < 3 + #define __Pyx_BUILTIN_MODULE_NAME "__builtin__" + #define __Pyx_DefaultClassType PyClass_Type + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a+k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_BUILTIN_MODULE_NAME "builtins" + #define __Pyx_DefaultClassType PyType_Type +#if CYTHON_COMPILING_IN_LIMITED_API + static CYTHON_INLINE PyObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyObject *exception_table = NULL; + PyObject *types_module=NULL, *code_type=NULL, *result=NULL; + PyObject *version_info; // borrowed + PyObject *py_minor_version = NULL; + long minor_version = 0; + PyObject *type, *value, *traceback; + PyErr_Fetch(&type, &value, &traceback); + #if __PYX_LIMITED_VERSION_HEX >= 0x030B0000 + minor_version = 11; // we don't yet need to distinguish between versions > 11 + #else + if (!(version_info = PySys_GetObject("version_info"))) goto end; + if (!(py_minor_version = PySequence_GetItem(version_info, 1))) goto end; + minor_version = PyLong_AsLong(py_minor_version); + if (minor_version == -1 && PyErr_Occurred()) goto end; + #endif + if (!(types_module = PyImport_ImportModule("types"))) goto end; + if (!(code_type = PyObject_GetAttrString(types_module, "CodeType"))) goto end; + if (minor_version <= 7) { + (void)p; + result = PyObject_CallFunction(code_type, "iiiiiOOOOOOiOO", a, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else if (minor_version <= 10) { + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOiOO", a,p, k, l, s, f, code, + c, n, v, fn, name, fline, lnos, fv, cell); + } else { + if (!(exception_table = PyBytes_FromStringAndSize(NULL, 0))) goto end; + result = PyObject_CallFunction(code_type, "iiiiiiOOOOOOOiOO", a,p, k, l, s, f, code, + c, n, v, fn, name, name, fline, lnos, exception_table, fv, cell); + } + end: + Py_XDECREF(code_type); + Py_XDECREF(exception_table); + Py_XDECREF(types_module); + Py_XDECREF(py_minor_version); + if (type) { + PyErr_Restore(type, value, traceback); + } + return result; + } + #ifndef CO_OPTIMIZED + #define CO_OPTIMIZED 0x0001 + #endif + #ifndef CO_NEWLOCALS + #define CO_NEWLOCALS 0x0002 + #endif + #ifndef CO_VARARGS + #define CO_VARARGS 0x0004 + #endif + #ifndef CO_VARKEYWORDS + #define CO_VARKEYWORDS 0x0008 + #endif + #ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x0200 + #endif + #ifndef CO_GENERATOR + #define CO_GENERATOR 0x0020 + #endif + #ifndef CO_COROUTINE + #define CO_COROUTINE 0x0080 + #endif +#elif PY_VERSION_HEX >= 0x030B0000 + static CYTHON_INLINE PyCodeObject* __Pyx_PyCode_New(int a, int p, int k, int l, int s, int f, + PyObject *code, PyObject *c, PyObject* n, PyObject *v, + PyObject *fv, PyObject *cell, PyObject* fn, + PyObject *name, int fline, PyObject *lnos) { + PyCodeObject *result; + PyObject *empty_bytes = PyBytes_FromStringAndSize("", 0); // we don't have access to __pyx_empty_bytes here + if (!empty_bytes) return NULL; + result = + #if PY_VERSION_HEX >= 0x030C0000 + PyUnstable_Code_NewWithPosOnlyArgs + #else + PyCode_NewWithPosOnlyArgs + #endif + (a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, name, fline, lnos, empty_bytes); + Py_DECREF(empty_bytes); + return result; + } +#elif PY_VERSION_HEX >= 0x030800B2 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_NewWithPosOnlyArgs(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#else + #define __Pyx_PyCode_New(a, p, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos)\ + PyCode_New(a, k, l, s, f, code, c, n, v, fv, cell, fn, name, fline, lnos) +#endif +#endif +#if PY_VERSION_HEX >= 0x030900A4 || defined(Py_IS_TYPE) + #define __Pyx_IS_TYPE(ob, type) Py_IS_TYPE(ob, type) +#else + #define __Pyx_IS_TYPE(ob, type) (((const PyObject*)ob)->ob_type == (type)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_Is) + #define __Pyx_Py_Is(x, y) Py_Is(x, y) +#else + #define __Pyx_Py_Is(x, y) ((x) == (y)) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsNone) + #define __Pyx_Py_IsNone(ob) Py_IsNone(ob) +#else + #define __Pyx_Py_IsNone(ob) __Pyx_Py_Is((ob), Py_None) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsTrue) + #define __Pyx_Py_IsTrue(ob) Py_IsTrue(ob) +#else + #define __Pyx_Py_IsTrue(ob) __Pyx_Py_Is((ob), Py_True) +#endif +#if PY_VERSION_HEX >= 0x030A00B1 || defined(Py_IsFalse) + #define __Pyx_Py_IsFalse(ob) Py_IsFalse(ob) +#else + #define __Pyx_Py_IsFalse(ob) __Pyx_Py_Is((ob), Py_False) +#endif +#define __Pyx_NoneAsNull(obj) (__Pyx_Py_IsNone(obj) ? NULL : (obj)) +#if PY_VERSION_HEX >= 0x030900F0 && !CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_GC_IsFinalized(o) PyObject_GC_IsFinalized(o) +#else + #define __Pyx_PyObject_GC_IsFinalized(o) _PyGC_FINALIZED(o) +#endif +#ifndef CO_COROUTINE + #define CO_COROUTINE 0x80 +#endif +#ifndef CO_ASYNC_GENERATOR + #define CO_ASYNC_GENERATOR 0x200 +#endif +#ifndef Py_TPFLAGS_CHECKTYPES + #define Py_TPFLAGS_CHECKTYPES 0 +#endif +#ifndef Py_TPFLAGS_HAVE_INDEX + #define Py_TPFLAGS_HAVE_INDEX 0 +#endif +#ifndef Py_TPFLAGS_HAVE_NEWBUFFER + #define Py_TPFLAGS_HAVE_NEWBUFFER 0 +#endif +#ifndef Py_TPFLAGS_HAVE_FINALIZE + #define Py_TPFLAGS_HAVE_FINALIZE 0 +#endif +#ifndef Py_TPFLAGS_SEQUENCE + #define Py_TPFLAGS_SEQUENCE 0 +#endif +#ifndef Py_TPFLAGS_MAPPING + #define Py_TPFLAGS_MAPPING 0 +#endif +#ifndef METH_STACKLESS + #define METH_STACKLESS 0 +#endif +#if PY_VERSION_HEX <= 0x030700A3 || !defined(METH_FASTCALL) + #ifndef METH_FASTCALL + #define METH_FASTCALL 0x80 + #endif + typedef PyObject *(*__Pyx_PyCFunctionFast) (PyObject *self, PyObject *const *args, Py_ssize_t nargs); + typedef PyObject *(*__Pyx_PyCFunctionFastWithKeywords) (PyObject *self, PyObject *const *args, + Py_ssize_t nargs, PyObject *kwnames); +#else + #define __Pyx_PyCFunctionFast _PyCFunctionFast + #define __Pyx_PyCFunctionFastWithKeywords _PyCFunctionFastWithKeywords +#endif +#if CYTHON_METH_FASTCALL + #define __Pyx_METH_FASTCALL METH_FASTCALL + #define __Pyx_PyCFunction_FastCall __Pyx_PyCFunctionFast + #define __Pyx_PyCFunction_FastCallWithKeywords __Pyx_PyCFunctionFastWithKeywords +#else + #define __Pyx_METH_FASTCALL METH_VARARGS + #define __Pyx_PyCFunction_FastCall PyCFunction + #define __Pyx_PyCFunction_FastCallWithKeywords PyCFunctionWithKeywords +#endif +#if CYTHON_VECTORCALL + #define __pyx_vectorcallfunc vectorcallfunc + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET PY_VECTORCALL_ARGUMENTS_OFFSET + #define __Pyx_PyVectorcall_NARGS(n) PyVectorcall_NARGS((size_t)(n)) +#elif CYTHON_BACKPORT_VECTORCALL + typedef PyObject *(*__pyx_vectorcallfunc)(PyObject *callable, PyObject *const *args, + size_t nargsf, PyObject *kwnames); + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET ((size_t)1 << (8 * sizeof(size_t) - 1)) + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(((size_t)(n)) & ~__Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET)) +#else + #define __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET 0 + #define __Pyx_PyVectorcall_NARGS(n) ((Py_ssize_t)(n)) +#endif +#if __PYX_LIMITED_VERSION_HEX < 0x030900B1 + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) ((void)m, PyType_FromSpecWithBases(s, b)) + typedef PyObject *(*__Pyx_PyCMethod)(PyObject *, PyTypeObject *, PyObject *const *, size_t, PyObject *); +#else + #define __Pyx_PyType_FromModuleAndSpec(m, s, b) PyType_FromModuleAndSpec(m, s, b) + #define __Pyx_PyCMethod PyCMethod +#endif +#ifndef METH_METHOD + #define METH_METHOD 0x200 +#endif +#if CYTHON_COMPILING_IN_PYPY && !defined(PyObject_Malloc) + #define PyObject_Malloc(s) PyMem_Malloc(s) + #define PyObject_Free(p) PyMem_Free(p) + #define PyObject_Realloc(p) PyMem_Realloc(p) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) +#else + #define __Pyx_PyCode_HasFreeVars(co) (PyCode_GetNumFree(co) > 0) + #define __Pyx_PyFrame_SetLineNumber(frame, lineno) (frame)->f_lineno = (lineno) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyThreadState_Current PyThreadState_Get() +#elif !CYTHON_FAST_THREAD_STATE + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#elif PY_VERSION_HEX >= 0x03060000 + #define __Pyx_PyThreadState_Current _PyThreadState_UncheckedGet() +#elif PY_VERSION_HEX >= 0x03000000 + #define __Pyx_PyThreadState_Current PyThreadState_GET() +#else + #define __Pyx_PyThreadState_Current _PyThreadState_Current +#endif +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE void *__Pyx_PyModule_GetState(PyObject *op) +{ + void *result; + result = PyModule_GetState(op); + if (!result) + Py_FatalError("Couldn't find the module state"); + return result; +} +#endif +#define __Pyx_PyObject_GetSlot(obj, name, func_ctype) __Pyx_PyType_GetSlot(Py_TYPE(obj), name, func_ctype) +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((func_ctype) PyType_GetSlot((type), Py_##name)) +#else + #define __Pyx_PyType_GetSlot(type, name, func_ctype) ((type)->name) +#endif +#if PY_VERSION_HEX < 0x030700A2 && !defined(PyThread_tss_create) && !defined(Py_tss_NEEDS_INIT) +#include "pythread.h" +#define Py_tss_NEEDS_INIT 0 +typedef int Py_tss_t; +static CYTHON_INLINE int PyThread_tss_create(Py_tss_t *key) { + *key = PyThread_create_key(); + return 0; +} +static CYTHON_INLINE Py_tss_t * PyThread_tss_alloc(void) { + Py_tss_t *key = (Py_tss_t *)PyObject_Malloc(sizeof(Py_tss_t)); + *key = Py_tss_NEEDS_INIT; + return key; +} +static CYTHON_INLINE void PyThread_tss_free(Py_tss_t *key) { + PyObject_Free(key); +} +static CYTHON_INLINE int PyThread_tss_is_created(Py_tss_t *key) { + return *key != Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE void PyThread_tss_delete(Py_tss_t *key) { + PyThread_delete_key(*key); + *key = Py_tss_NEEDS_INIT; +} +static CYTHON_INLINE int PyThread_tss_set(Py_tss_t *key, void *value) { + return PyThread_set_key_value(*key, value); +} +static CYTHON_INLINE void * PyThread_tss_get(Py_tss_t *key) { + return PyThread_get_key_value(*key); +} +#endif +#if PY_MAJOR_VERSION < 3 + #if CYTHON_COMPILING_IN_PYPY + #if PYPY_VERSION_NUM < 0x07030600 + #if defined(__cplusplus) && __cplusplus >= 201402L + [[deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")]] + #elif defined(__GNUC__) || defined(__clang__) + __attribute__ ((__deprecated__("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6"))) + #elif defined(_MSC_VER) + __declspec(deprecated("`with nogil:` inside a nogil function will not release the GIL in PyPy2 < 7.3.6")) + #endif + static CYTHON_INLINE int PyGILState_Check(void) { + return 0; + } + #else // PYPY_VERSION_NUM < 0x07030600 + #endif // PYPY_VERSION_NUM < 0x07030600 + #else + static CYTHON_INLINE int PyGILState_Check(void) { + PyThreadState * tstate = _PyThreadState_Current; + return tstate && (tstate == PyGILState_GetThisThreadState()); + } + #endif +#endif +#if CYTHON_COMPILING_IN_CPYTHON || defined(_PyDict_NewPresized) +#define __Pyx_PyDict_NewPresized(n) ((n <= 8) ? PyDict_New() : _PyDict_NewPresized(n)) +#else +#define __Pyx_PyDict_NewPresized(n) PyDict_New() +#endif +#if PY_MAJOR_VERSION >= 3 || CYTHON_FUTURE_DIVISION + #define __Pyx_PyNumber_Divide(x,y) PyNumber_TrueDivide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceTrueDivide(x,y) +#else + #define __Pyx_PyNumber_Divide(x,y) PyNumber_Divide(x,y) + #define __Pyx_PyNumber_InPlaceDivide(x,y) PyNumber_InPlaceDivide(x,y) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX > 0x030600B4 && CYTHON_USE_UNICODE_INTERNALS +#define __Pyx_PyDict_GetItemStrWithError(dict, name) _PyDict_GetItem_KnownHash(dict, name, ((PyASCIIObject *) name)->hash) +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStr(PyObject *dict, PyObject *name) { + PyObject *res = __Pyx_PyDict_GetItemStrWithError(dict, name); + if (res == NULL) PyErr_Clear(); + return res; +} +#elif PY_MAJOR_VERSION >= 3 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07020000) +#define __Pyx_PyDict_GetItemStrWithError PyDict_GetItemWithError +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#else +static CYTHON_INLINE PyObject * __Pyx_PyDict_GetItemStrWithError(PyObject *dict, PyObject *name) { +#if CYTHON_COMPILING_IN_PYPY + return PyDict_GetItem(dict, name); +#else + PyDictEntry *ep; + PyDictObject *mp = (PyDictObject*) dict; + long hash = ((PyStringObject *) name)->ob_shash; + assert(hash != -1); + ep = (mp->ma_lookup)(mp, name, hash); + if (ep == NULL) { + return NULL; + } + return ep->me_value; +#endif +} +#define __Pyx_PyDict_GetItemStr PyDict_GetItem +#endif +#if CYTHON_USE_TYPE_SLOTS + #define __Pyx_PyType_GetFlags(tp) (((PyTypeObject *)tp)->tp_flags) + #define __Pyx_PyType_HasFeature(type, feature) ((__Pyx_PyType_GetFlags(type) & (feature)) != 0) + #define __Pyx_PyObject_GetIterNextFunc(obj) (Py_TYPE(obj)->tp_iternext) +#else + #define __Pyx_PyType_GetFlags(tp) (PyType_GetFlags((PyTypeObject *)tp)) + #define __Pyx_PyType_HasFeature(type, feature) PyType_HasFeature(type, feature) + #define __Pyx_PyObject_GetIterNextFunc(obj) PyIter_Next +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_SetItemOnTypeDict(tp, k, v) PyObject_GenericSetAttr((PyObject*)tp, k, v) +#else + #define __Pyx_SetItemOnTypeDict(tp, k, v) PyDict_SetItem(tp->tp_dict, k, v) +#endif +#if CYTHON_USE_TYPE_SPECS && PY_VERSION_HEX >= 0x03080000 +#define __Pyx_PyHeapTypeObject_GC_Del(obj) {\ + PyTypeObject *type = Py_TYPE(obj);\ + assert(__Pyx_PyType_HasFeature(type, Py_TPFLAGS_HEAPTYPE));\ + PyObject_GC_Del(obj);\ + Py_DECREF(type);\ +} +#else +#define __Pyx_PyHeapTypeObject_GC_Del(obj) PyObject_GC_Del(obj) +#endif +#if CYTHON_COMPILING_IN_LIMITED_API + #define CYTHON_PEP393_ENABLED 1 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GetLength(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_ReadChar(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((void)u, 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((void)u, (0)) + #define __Pyx_PyUnicode_DATA(u) ((void*)u) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)k, PyUnicode_ReadChar((PyObject*)(d), i)) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GetLength(u)) +#elif PY_VERSION_HEX > 0x03030000 && defined(PyUnicode_KIND) + #define CYTHON_PEP393_ENABLED 1 + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_READY(op) (0) + #else + #define __Pyx_PyUnicode_READY(op) (likely(PyUnicode_IS_READY(op)) ?\ + 0 : _PyUnicode_Ready((PyObject *)(op))) + #endif + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_LENGTH(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) PyUnicode_READ_CHAR(u, i) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) PyUnicode_MAX_CHAR_VALUE(u) + #define __Pyx_PyUnicode_KIND(u) ((int)PyUnicode_KIND(u)) + #define __Pyx_PyUnicode_DATA(u) PyUnicode_DATA(u) + #define __Pyx_PyUnicode_READ(k, d, i) PyUnicode_READ(k, d, i) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) PyUnicode_WRITE(k, d, i, (Py_UCS4) ch) + #if PY_VERSION_HEX >= 0x030C0000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_LENGTH(u)) + #else + #if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x03090000 + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : ((PyCompactUnicodeObject *)(u))->wstr_length)) + #else + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != (likely(PyUnicode_IS_READY(u)) ? PyUnicode_GET_LENGTH(u) : PyUnicode_GET_SIZE(u))) + #endif + #endif +#else + #define CYTHON_PEP393_ENABLED 0 + #define PyUnicode_1BYTE_KIND 1 + #define PyUnicode_2BYTE_KIND 2 + #define PyUnicode_4BYTE_KIND 4 + #define __Pyx_PyUnicode_READY(op) (0) + #define __Pyx_PyUnicode_GET_LENGTH(u) PyUnicode_GET_SIZE(u) + #define __Pyx_PyUnicode_READ_CHAR(u, i) ((Py_UCS4)(PyUnicode_AS_UNICODE(u)[i])) + #define __Pyx_PyUnicode_MAX_CHAR_VALUE(u) ((sizeof(Py_UNICODE) == 2) ? 65535U : 1114111U) + #define __Pyx_PyUnicode_KIND(u) ((int)sizeof(Py_UNICODE)) + #define __Pyx_PyUnicode_DATA(u) ((void*)PyUnicode_AS_UNICODE(u)) + #define __Pyx_PyUnicode_READ(k, d, i) ((void)(k), (Py_UCS4)(((Py_UNICODE*)d)[i])) + #define __Pyx_PyUnicode_WRITE(k, d, i, ch) (((void)(k)), ((Py_UNICODE*)d)[i] = (Py_UNICODE) ch) + #define __Pyx_PyUnicode_IS_TRUE(u) (0 != PyUnicode_GET_SIZE(u)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyUnicode_Concat(a, b) PyNumber_Add(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) PyNumber_Add(a, b) +#else + #define __Pyx_PyUnicode_Concat(a, b) PyUnicode_Concat(a, b) + #define __Pyx_PyUnicode_ConcatSafe(a, b) ((unlikely((a) == Py_None) || unlikely((b) == Py_None)) ?\ + PyNumber_Add(a, b) : __Pyx_PyUnicode_Concat(a, b)) +#endif +#if CYTHON_COMPILING_IN_PYPY + #if !defined(PyUnicode_DecodeUnicodeEscape) + #define PyUnicode_DecodeUnicodeEscape(s, size, errors) PyUnicode_Decode(s, size, "unicode_escape", errors) + #endif + #if !defined(PyUnicode_Contains) || (PY_MAJOR_VERSION == 2 && PYPY_VERSION_NUM < 0x07030500) + #undef PyUnicode_Contains + #define PyUnicode_Contains(u, s) PySequence_Contains(u, s) + #endif + #if !defined(PyByteArray_Check) + #define PyByteArray_Check(obj) PyObject_TypeCheck(obj, &PyByteArray_Type) + #endif + #if !defined(PyObject_Format) + #define PyObject_Format(obj, fmt) PyObject_CallMethod(obj, "__format__", "O", fmt) + #endif +#endif +#define __Pyx_PyString_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyString_Check(b) && !PyString_CheckExact(b)))) ? PyNumber_Remainder(a, b) : __Pyx_PyString_Format(a, b)) +#define __Pyx_PyUnicode_FormatSafe(a, b) ((unlikely((a) == Py_None || (PyUnicode_Check(b) && !PyUnicode_CheckExact(b)))) ? PyNumber_Remainder(a, b) : PyUnicode_Format(a, b)) +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyString_Format(a, b) PyUnicode_Format(a, b) +#else + #define __Pyx_PyString_Format(a, b) PyString_Format(a, b) +#endif +#if PY_MAJOR_VERSION < 3 && !defined(PyObject_ASCII) + #define PyObject_ASCII(o) PyObject_Repr(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBaseString_Type PyUnicode_Type + #define PyStringObject PyUnicodeObject + #define PyString_Type PyUnicode_Type + #define PyString_Check PyUnicode_Check + #define PyString_CheckExact PyUnicode_CheckExact +#ifndef PyObject_Unicode + #define PyObject_Unicode PyObject_Str +#endif +#endif +#if PY_MAJOR_VERSION >= 3 + #define __Pyx_PyBaseString_Check(obj) PyUnicode_Check(obj) + #define __Pyx_PyBaseString_CheckExact(obj) PyUnicode_CheckExact(obj) +#else + #define __Pyx_PyBaseString_Check(obj) (PyString_Check(obj) || PyUnicode_Check(obj)) + #define __Pyx_PyBaseString_CheckExact(obj) (PyString_CheckExact(obj) || PyUnicode_CheckExact(obj)) +#endif +#if CYTHON_COMPILING_IN_CPYTHON + #define __Pyx_PySequence_ListKeepNew(obj)\ + (likely(PyList_CheckExact(obj) && Py_REFCNT(obj) == 1) ? __Pyx_NewRef(obj) : PySequence_List(obj)) +#else + #define __Pyx_PySequence_ListKeepNew(obj) PySequence_List(obj) +#endif +#ifndef PySet_CheckExact + #define PySet_CheckExact(obj) __Pyx_IS_TYPE(obj, &PySet_Type) +#endif +#if PY_VERSION_HEX >= 0x030900A4 + #define __Pyx_SET_REFCNT(obj, refcnt) Py_SET_REFCNT(obj, refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SET_SIZE(obj, size) +#else + #define __Pyx_SET_REFCNT(obj, refcnt) Py_REFCNT(obj) = (refcnt) + #define __Pyx_SET_SIZE(obj, size) Py_SIZE(obj) = (size) +#endif +#if CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_PySequence_ITEM(o, i) PySequence_ITEM(o, i) + #define __Pyx_PySequence_SIZE(seq) Py_SIZE(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) (PyTuple_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyList_SET_ITEM(o, i, v) (PyList_SET_ITEM(o, i, v), (0)) + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_GET_SIZE(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_GET_SIZE(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_GET_SIZE(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_GET_SIZE(o) +#else + #define __Pyx_PySequence_ITEM(o, i) PySequence_GetItem(o, i) + #define __Pyx_PySequence_SIZE(seq) PySequence_Size(seq) + #define __Pyx_PyTuple_SET_ITEM(o, i, v) PyTuple_SetItem(o, i, v) + #define __Pyx_PyList_SET_ITEM(o, i, v) PyList_SetItem(o, i, v) + #define __Pyx_PyTuple_GET_SIZE(o) PyTuple_Size(o) + #define __Pyx_PyList_GET_SIZE(o) PyList_Size(o) + #define __Pyx_PySet_GET_SIZE(o) PySet_Size(o) + #define __Pyx_PyBytes_GET_SIZE(o) PyBytes_Size(o) + #define __Pyx_PyByteArray_GET_SIZE(o) PyByteArray_Size(o) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyIntObject PyLongObject + #define PyInt_Type PyLong_Type + #define PyInt_Check(op) PyLong_Check(op) + #define PyInt_CheckExact(op) PyLong_CheckExact(op) + #define __Pyx_Py3Int_Check(op) PyLong_Check(op) + #define __Pyx_Py3Int_CheckExact(op) PyLong_CheckExact(op) + #define PyInt_FromString PyLong_FromString + #define PyInt_FromUnicode PyLong_FromUnicode + #define PyInt_FromLong PyLong_FromLong + #define PyInt_FromSize_t PyLong_FromSize_t + #define PyInt_FromSsize_t PyLong_FromSsize_t + #define PyInt_AsLong PyLong_AsLong + #define PyInt_AS_LONG PyLong_AS_LONG + #define PyInt_AsSsize_t PyLong_AsSsize_t + #define PyInt_AsUnsignedLongMask PyLong_AsUnsignedLongMask + #define PyInt_AsUnsignedLongLongMask PyLong_AsUnsignedLongLongMask + #define PyNumber_Int PyNumber_Long +#else + #define __Pyx_Py3Int_Check(op) (PyLong_Check(op) || PyInt_Check(op)) + #define __Pyx_Py3Int_CheckExact(op) (PyLong_CheckExact(op) || PyInt_CheckExact(op)) +#endif +#if PY_MAJOR_VERSION >= 3 + #define PyBoolObject PyLongObject +#endif +#if PY_MAJOR_VERSION >= 3 && CYTHON_COMPILING_IN_PYPY + #ifndef PyUnicode_InternFromString + #define PyUnicode_InternFromString(s) PyUnicode_FromString(s) + #endif +#endif +#if PY_VERSION_HEX < 0x030200A4 + typedef long Py_hash_t; + #define __Pyx_PyInt_FromHash_t PyInt_FromLong + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsHash_t +#else + #define __Pyx_PyInt_FromHash_t PyInt_FromSsize_t + #define __Pyx_PyInt_AsHash_t __Pyx_PyIndex_AsSsize_t +#endif +#if CYTHON_USE_ASYNC_SLOTS + #if PY_VERSION_HEX >= 0x030500B1 + #define __Pyx_PyAsyncMethodsStruct PyAsyncMethods + #define __Pyx_PyType_AsAsync(obj) (Py_TYPE(obj)->tp_as_async) + #else + #define __Pyx_PyType_AsAsync(obj) ((__Pyx_PyAsyncMethodsStruct*) (Py_TYPE(obj)->tp_reserved)) + #endif +#else + #define __Pyx_PyType_AsAsync(obj) NULL +#endif +#ifndef __Pyx_PyAsyncMethodsStruct + typedef struct { + unaryfunc am_await; + unaryfunc am_aiter; + unaryfunc am_anext; + } __Pyx_PyAsyncMethodsStruct; +#endif + +#if defined(_WIN32) || defined(WIN32) || defined(MS_WINDOWS) + #if !defined(_USE_MATH_DEFINES) + #define _USE_MATH_DEFINES + #endif +#endif +#include +#ifdef NAN +#define __PYX_NAN() ((float) NAN) +#else +static CYTHON_INLINE float __PYX_NAN() { + float value; + memset(&value, 0xFF, sizeof(value)); + return value; +} +#endif +#if defined(__CYGWIN__) && defined(_LDBL_EQ_DBL) +#define __Pyx_truncl trunc +#else +#define __Pyx_truncl truncl +#endif + +#define __PYX_MARK_ERR_POS(f_index, lineno) \ + { __pyx_filename = __pyx_f[f_index]; (void)__pyx_filename; __pyx_lineno = lineno; (void)__pyx_lineno; __pyx_clineno = __LINE__; (void)__pyx_clineno; } +#define __PYX_ERR(f_index, lineno, Ln_error) \ + { __PYX_MARK_ERR_POS(f_index, lineno) goto Ln_error; } + +#ifdef CYTHON_EXTERN_C + #undef __PYX_EXTERN_C + #define __PYX_EXTERN_C CYTHON_EXTERN_C +#elif defined(__PYX_EXTERN_C) + #ifdef _MSC_VER + #pragma message ("Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead.") + #else + #warning Please do not define the '__PYX_EXTERN_C' macro externally. Use 'CYTHON_EXTERN_C' instead. + #endif +#else + #ifdef __cplusplus + #define __PYX_EXTERN_C extern "C" + #else + #define __PYX_EXTERN_C extern + #endif +#endif + +#define __PYX_HAVE__box_intersection +#define __PYX_HAVE_API__box_intersection +/* Early includes */ +#include +#include + + /* Using NumPy API declarations from "numpy/__init__.cython-30.pxd" */ + +#include "numpy/arrayobject.h" +#include "numpy/ndarrayobject.h" +#include "numpy/ndarraytypes.h" +#include "numpy/arrayscalars.h" +#include "numpy/ufuncobject.h" +#include "pythread.h" +#include +#ifdef _OPENMP +#include +#endif /* _OPENMP */ + +#if defined(PYREX_WITHOUT_ASSERTIONS) && !defined(CYTHON_WITHOUT_ASSERTIONS) +#define CYTHON_WITHOUT_ASSERTIONS +#endif + +typedef struct {PyObject **p; const char *s; const Py_ssize_t n; const char* encoding; + const char is_unicode; const char is_str; const char intern; } __Pyx_StringTabEntry; + +#define __PYX_DEFAULT_STRING_ENCODING_IS_ASCII 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_UTF8 0 +#define __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT (PY_MAJOR_VERSION >= 3 && __PYX_DEFAULT_STRING_ENCODING_IS_UTF8) +#define __PYX_DEFAULT_STRING_ENCODING "" +#define __Pyx_PyObject_FromString __Pyx_PyBytes_FromString +#define __Pyx_PyObject_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#define __Pyx_uchar_cast(c) ((unsigned char)c) +#define __Pyx_long_cast(x) ((long)x) +#define __Pyx_fits_Py_ssize_t(v, type, is_signed) (\ + (sizeof(type) < sizeof(Py_ssize_t)) ||\ + (sizeof(type) > sizeof(Py_ssize_t) &&\ + likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX) &&\ + (!is_signed || likely(v > (type)PY_SSIZE_T_MIN ||\ + v == (type)PY_SSIZE_T_MIN))) ||\ + (sizeof(type) == sizeof(Py_ssize_t) &&\ + (is_signed || likely(v < (type)PY_SSIZE_T_MAX ||\ + v == (type)PY_SSIZE_T_MAX))) ) +static CYTHON_INLINE int __Pyx_is_valid_index(Py_ssize_t i, Py_ssize_t limit) { + return (size_t) i < (size_t) limit; +} +#if defined (__cplusplus) && __cplusplus >= 201103L + #include + #define __Pyx_sst_abs(value) std::abs(value) +#elif SIZEOF_INT >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) abs(value) +#elif SIZEOF_LONG >= SIZEOF_SIZE_T + #define __Pyx_sst_abs(value) labs(value) +#elif defined (_MSC_VER) + #define __Pyx_sst_abs(value) ((Py_ssize_t)_abs64(value)) +#elif defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L + #define __Pyx_sst_abs(value) llabs(value) +#elif defined (__GNUC__) + #define __Pyx_sst_abs(value) __builtin_llabs(value) +#else + #define __Pyx_sst_abs(value) ((value<0) ? -value : value) +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject*); +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject*, Py_ssize_t* length); +#define __Pyx_PyByteArray_FromString(s) PyByteArray_FromStringAndSize((const char*)s, strlen((const char*)s)) +#define __Pyx_PyByteArray_FromStringAndSize(s, l) PyByteArray_FromStringAndSize((const char*)s, l) +#define __Pyx_PyBytes_FromString PyBytes_FromString +#define __Pyx_PyBytes_FromStringAndSize PyBytes_FromStringAndSize +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char*); +#if PY_MAJOR_VERSION < 3 + #define __Pyx_PyStr_FromString __Pyx_PyBytes_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyBytes_FromStringAndSize +#else + #define __Pyx_PyStr_FromString __Pyx_PyUnicode_FromString + #define __Pyx_PyStr_FromStringAndSize __Pyx_PyUnicode_FromStringAndSize +#endif +#define __Pyx_PyBytes_AsWritableString(s) ((char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableSString(s) ((signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsWritableUString(s) ((unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsString(s) ((const char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsSString(s) ((const signed char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyBytes_AsUString(s) ((const unsigned char*) PyBytes_AS_STRING(s)) +#define __Pyx_PyObject_AsWritableString(s) ((char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableSString(s) ((signed char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsWritableUString(s) ((unsigned char*)(__pyx_uintptr_t) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsSString(s) ((const signed char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_AsUString(s) ((const unsigned char*) __Pyx_PyObject_AsString(s)) +#define __Pyx_PyObject_FromCString(s) __Pyx_PyObject_FromString((const char*)s) +#define __Pyx_PyBytes_FromCString(s) __Pyx_PyBytes_FromString((const char*)s) +#define __Pyx_PyByteArray_FromCString(s) __Pyx_PyByteArray_FromString((const char*)s) +#define __Pyx_PyStr_FromCString(s) __Pyx_PyStr_FromString((const char*)s) +#define __Pyx_PyUnicode_FromCString(s) __Pyx_PyUnicode_FromString((const char*)s) +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const wchar_t *u) +{ + const wchar_t *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#else +static CYTHON_INLINE size_t __Pyx_Py_UNICODE_strlen(const Py_UNICODE *u) +{ + const Py_UNICODE *u_end = u; + while (*u_end++) ; + return (size_t)(u_end - u - 1); +} +#endif +#define __Pyx_PyUnicode_FromOrdinal(o) PyUnicode_FromOrdinal((int)o) +#define __Pyx_PyUnicode_FromUnicode(u) PyUnicode_FromUnicode(u, __Pyx_Py_UNICODE_strlen(u)) +#define __Pyx_PyUnicode_FromUnicodeAndLength PyUnicode_FromUnicode +#define __Pyx_PyUnicode_AsUnicode PyUnicode_AsUnicode +#define __Pyx_NewRef(obj) (Py_INCREF(obj), obj) +#define __Pyx_Owned_Py_None(b) __Pyx_NewRef(Py_None) +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b); +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject*); +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject*); +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x); +#define __Pyx_PySequence_Tuple(obj)\ + (likely(PyTuple_CheckExact(obj)) ? __Pyx_NewRef(obj) : PySequence_Tuple(obj)) +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject*); +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t); +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject*); +#if CYTHON_ASSUME_SAFE_MACROS +#define __pyx_PyFloat_AsDouble(x) (PyFloat_CheckExact(x) ? PyFloat_AS_DOUBLE(x) : PyFloat_AsDouble(x)) +#else +#define __pyx_PyFloat_AsDouble(x) PyFloat_AsDouble(x) +#endif +#define __pyx_PyFloat_AsFloat(x) ((float) __pyx_PyFloat_AsDouble(x)) +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyNumber_Int(x) (PyLong_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Long(x)) +#else +#define __Pyx_PyNumber_Int(x) (PyInt_CheckExact(x) ? __Pyx_NewRef(x) : PyNumber_Int(x)) +#endif +#if CYTHON_USE_PYLONG_INTERNALS + #if PY_VERSION_HEX >= 0x030C00A7 + #ifndef _PyLong_SIGN_MASK + #define _PyLong_SIGN_MASK 3 + #endif + #ifndef _PyLong_NON_SIZE_BITS + #define _PyLong_NON_SIZE_BITS 3 + #endif + #define __Pyx_PyLong_Sign(x) (((PyLongObject*)x)->long_value.lv_tag & _PyLong_SIGN_MASK) + #define __Pyx_PyLong_IsNeg(x) ((__Pyx_PyLong_Sign(x) & 2) != 0) + #define __Pyx_PyLong_IsNonNeg(x) (!__Pyx_PyLong_IsNeg(x)) + #define __Pyx_PyLong_IsZero(x) (__Pyx_PyLong_Sign(x) & 1) + #define __Pyx_PyLong_IsPos(x) (__Pyx_PyLong_Sign(x) == 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) (__Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) ((Py_ssize_t) (((PyLongObject*)x)->long_value.lv_tag >> _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_SignedDigitCount(x)\ + ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * __Pyx_PyLong_DigitCount(x)) + #if defined(PyUnstable_Long_IsCompact) && defined(PyUnstable_Long_CompactValue) + #define __Pyx_PyLong_IsCompact(x) PyUnstable_Long_IsCompact((PyLongObject*) x) + #define __Pyx_PyLong_CompactValue(x) PyUnstable_Long_CompactValue((PyLongObject*) x) + #else + #define __Pyx_PyLong_IsCompact(x) (((PyLongObject*)x)->long_value.lv_tag < (2 << _PyLong_NON_SIZE_BITS)) + #define __Pyx_PyLong_CompactValue(x) ((1 - (Py_ssize_t) __Pyx_PyLong_Sign(x)) * (Py_ssize_t) __Pyx_PyLong_Digits(x)[0]) + #endif + typedef Py_ssize_t __Pyx_compact_pylong; + typedef size_t __Pyx_compact_upylong; + #else // Py < 3.12 + #define __Pyx_PyLong_IsNeg(x) (Py_SIZE(x) < 0) + #define __Pyx_PyLong_IsNonNeg(x) (Py_SIZE(x) >= 0) + #define __Pyx_PyLong_IsZero(x) (Py_SIZE(x) == 0) + #define __Pyx_PyLong_IsPos(x) (Py_SIZE(x) > 0) + #define __Pyx_PyLong_CompactValueUnsigned(x) ((Py_SIZE(x) == 0) ? 0 : __Pyx_PyLong_Digits(x)[0]) + #define __Pyx_PyLong_DigitCount(x) __Pyx_sst_abs(Py_SIZE(x)) + #define __Pyx_PyLong_SignedDigitCount(x) Py_SIZE(x) + #define __Pyx_PyLong_IsCompact(x) (Py_SIZE(x) == 0 || Py_SIZE(x) == 1 || Py_SIZE(x) == -1) + #define __Pyx_PyLong_CompactValue(x)\ + ((Py_SIZE(x) == 0) ? (sdigit) 0 : ((Py_SIZE(x) < 0) ? -(sdigit)__Pyx_PyLong_Digits(x)[0] : (sdigit)__Pyx_PyLong_Digits(x)[0])) + typedef sdigit __Pyx_compact_pylong; + typedef digit __Pyx_compact_upylong; + #endif + #if PY_VERSION_HEX >= 0x030C00A5 + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->long_value.ob_digit) + #else + #define __Pyx_PyLong_Digits(x) (((PyLongObject*)x)->ob_digit) + #endif +#endif +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII +static int __Pyx_sys_getdefaultencoding_not_ascii; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + PyObject* ascii_chars_u = NULL; + PyObject* ascii_chars_b = NULL; + const char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + if (strcmp(default_encoding_c, "ascii") == 0) { + __Pyx_sys_getdefaultencoding_not_ascii = 0; + } else { + char ascii_chars[128]; + int c; + for (c = 0; c < 128; c++) { + ascii_chars[c] = (char) c; + } + __Pyx_sys_getdefaultencoding_not_ascii = 1; + ascii_chars_u = PyUnicode_DecodeASCII(ascii_chars, 128, NULL); + if (!ascii_chars_u) goto bad; + ascii_chars_b = PyUnicode_AsEncodedString(ascii_chars_u, default_encoding_c, NULL); + if (!ascii_chars_b || !PyBytes_Check(ascii_chars_b) || memcmp(ascii_chars, PyBytes_AS_STRING(ascii_chars_b), 128) != 0) { + PyErr_Format( + PyExc_ValueError, + "This module compiled with c_string_encoding=ascii, but default encoding '%.200s' is not a superset of ascii.", + default_encoding_c); + goto bad; + } + Py_DECREF(ascii_chars_u); + Py_DECREF(ascii_chars_b); + } + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + Py_XDECREF(ascii_chars_u); + Py_XDECREF(ascii_chars_b); + return -1; +} +#endif +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT && PY_MAJOR_VERSION >= 3 +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_DecodeUTF8(c_str, size, NULL) +#else +#define __Pyx_PyUnicode_FromStringAndSize(c_str, size) PyUnicode_Decode(c_str, size, __PYX_DEFAULT_STRING_ENCODING, NULL) +#if __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +static char* __PYX_DEFAULT_STRING_ENCODING; +static int __Pyx_init_sys_getdefaultencoding_params(void) { + PyObject* sys; + PyObject* default_encoding = NULL; + char* default_encoding_c; + sys = PyImport_ImportModule("sys"); + if (!sys) goto bad; + default_encoding = PyObject_CallMethod(sys, (char*) (const char*) "getdefaultencoding", NULL); + Py_DECREF(sys); + if (!default_encoding) goto bad; + default_encoding_c = PyBytes_AsString(default_encoding); + if (!default_encoding_c) goto bad; + __PYX_DEFAULT_STRING_ENCODING = (char*) malloc(strlen(default_encoding_c) + 1); + if (!__PYX_DEFAULT_STRING_ENCODING) goto bad; + strcpy(__PYX_DEFAULT_STRING_ENCODING, default_encoding_c); + Py_DECREF(default_encoding); + return 0; +bad: + Py_XDECREF(default_encoding); + return -1; +} +#endif +#endif + + +/* Test for GCC > 2.95 */ +#if defined(__GNUC__) && (__GNUC__ > 2 || (__GNUC__ == 2 && (__GNUC_MINOR__ > 95))) + #define likely(x) __builtin_expect(!!(x), 1) + #define unlikely(x) __builtin_expect(!!(x), 0) +#else /* !__GNUC__ or GCC < 2.95 */ + #define likely(x) (x) + #define unlikely(x) (x) +#endif /* __GNUC__ */ +static CYTHON_INLINE void __Pyx_pretend_to_initialize(void* ptr) { (void)ptr; } + +#if !CYTHON_USE_MODULE_STATE +static PyObject *__pyx_m = NULL; +#endif +static int __pyx_lineno; +static int __pyx_clineno = 0; +static const char * __pyx_cfilenm = __FILE__; +static const char *__pyx_filename; + +/* Header.proto */ +#if !defined(CYTHON_CCOMPLEX) + #if defined(__cplusplus) + #define CYTHON_CCOMPLEX 1 + #elif (defined(_Complex_I) && !defined(_MSC_VER)) || ((defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) && !defined(__STDC_NO_COMPLEX__)) + #define CYTHON_CCOMPLEX 1 + #else + #define CYTHON_CCOMPLEX 0 + #endif +#endif +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #include + #else + #include + #endif +#endif +#if CYTHON_CCOMPLEX && !defined(__cplusplus) && defined(__sun__) && defined(__GNUC__) + #undef _Complex_I + #define _Complex_I 1.0fj +#endif + +/* #### Code section: filename_table ### */ + +static const char *__pyx_f[] = { + "box_intersection.pyx", + "", + "__init__.cython-30.pxd", + "type.pxd", +}; +/* #### Code section: utility_code_proto_before_types ### */ +/* ForceInitThreads.proto */ +#ifndef __PYX_FORCE_INIT_THREADS + #define __PYX_FORCE_INIT_THREADS 0 +#endif + +/* NoFastGil.proto */ +#define __Pyx_PyGILState_Ensure PyGILState_Ensure +#define __Pyx_PyGILState_Release PyGILState_Release +#define __Pyx_FastGIL_Remember() +#define __Pyx_FastGIL_Forget() +#define __Pyx_FastGilFuncInit() + +/* BufferFormatStructs.proto */ +struct __Pyx_StructField_; +#define __PYX_BUF_FLAGS_PACKED_STRUCT (1 << 0) +typedef struct { + const char* name; + struct __Pyx_StructField_* fields; + size_t size; + size_t arraysize[8]; + int ndim; + char typegroup; + char is_unsigned; + int flags; +} __Pyx_TypeInfo; +typedef struct __Pyx_StructField_ { + __Pyx_TypeInfo* type; + const char* name; + size_t offset; +} __Pyx_StructField; +typedef struct { + __Pyx_StructField* field; + size_t parent_offset; +} __Pyx_BufFmt_StackElem; +typedef struct { + __Pyx_StructField root; + __Pyx_BufFmt_StackElem* head; + size_t fmt_offset; + size_t new_count, enc_count; + size_t struct_alignment; + int is_complex; + char enc_type; + char new_packmode; + char enc_packmode; + char is_valid_array; +} __Pyx_BufFmt_Context; + +/* Atomics.proto */ +#include +#ifndef CYTHON_ATOMICS + #define CYTHON_ATOMICS 1 +#endif +#define __PYX_CYTHON_ATOMICS_ENABLED() CYTHON_ATOMICS +#define __pyx_atomic_int_type int +#define __pyx_nonatomic_int_type int +#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\ + (__STDC_VERSION__ >= 201112L) &&\ + !defined(__STDC_NO_ATOMICS__)) + #include +#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\ + (__cplusplus >= 201103L) ||\ + (defined(_MSC_VER) && _MSC_VER >= 1700))) + #include +#endif +#if CYTHON_ATOMICS && (defined(__STDC_VERSION__) &&\ + (__STDC_VERSION__ >= 201112L) &&\ + !defined(__STDC_NO_ATOMICS__) &&\ + ATOMIC_INT_LOCK_FREE == 2) + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type atomic_int + #define __pyx_atomic_incr_aligned(value) atomic_fetch_add_explicit(value, 1, memory_order_relaxed) + #define __pyx_atomic_decr_aligned(value) atomic_fetch_sub_explicit(value, 1, memory_order_acq_rel) + #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) + #pragma message ("Using standard C atomics") + #elif defined(__PYX_DEBUG_ATOMICS) + #warning "Using standard C atomics" + #endif +#elif CYTHON_ATOMICS && (defined(__cplusplus) && (\ + (__cplusplus >= 201103L) ||\ +\ + (defined(_MSC_VER) && _MSC_VER >= 1700)) &&\ + ATOMIC_INT_LOCK_FREE == 2) + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type std::atomic_int + #define __pyx_atomic_incr_aligned(value) std::atomic_fetch_add_explicit(value, 1, std::memory_order_relaxed) + #define __pyx_atomic_decr_aligned(value) std::atomic_fetch_sub_explicit(value, 1, std::memory_order_acq_rel) + #if defined(__PYX_DEBUG_ATOMICS) && defined(_MSC_VER) + #pragma message ("Using standard C++ atomics") + #elif defined(__PYX_DEBUG_ATOMICS) + #warning "Using standard C++ atomics" + #endif +#elif CYTHON_ATOMICS && (__GNUC__ >= 5 || (__GNUC__ == 4 &&\ + (__GNUC_MINOR__ > 1 ||\ + (__GNUC_MINOR__ == 1 && __GNUC_PATCHLEVEL__ >= 2)))) + #define __pyx_atomic_incr_aligned(value) __sync_fetch_and_add(value, 1) + #define __pyx_atomic_decr_aligned(value) __sync_fetch_and_sub(value, 1) + #ifdef __PYX_DEBUG_ATOMICS + #warning "Using GNU atomics" + #endif +#elif CYTHON_ATOMICS && defined(_MSC_VER) + #include + #undef __pyx_atomic_int_type + #define __pyx_atomic_int_type long + #define __pyx_nonatomic_int_type long + #pragma intrinsic (_InterlockedExchangeAdd) + #define __pyx_atomic_incr_aligned(value) _InterlockedExchangeAdd(value, 1) + #define __pyx_atomic_decr_aligned(value) _InterlockedExchangeAdd(value, -1) + #ifdef __PYX_DEBUG_ATOMICS + #pragma message ("Using MSVC atomics") + #endif +#else + #undef CYTHON_ATOMICS + #define CYTHON_ATOMICS 0 + #ifdef __PYX_DEBUG_ATOMICS + #warning "Not using atomics" + #endif +#endif +#if CYTHON_ATOMICS + #define __pyx_add_acquisition_count(memview)\ + __pyx_atomic_incr_aligned(__pyx_get_slice_count_pointer(memview)) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_atomic_decr_aligned(__pyx_get_slice_count_pointer(memview)) +#else + #define __pyx_add_acquisition_count(memview)\ + __pyx_add_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) + #define __pyx_sub_acquisition_count(memview)\ + __pyx_sub_acquisition_count_locked(__pyx_get_slice_count_pointer(memview), memview->lock) +#endif + +/* MemviewSliceStruct.proto */ +struct __pyx_memoryview_obj; +typedef struct { + struct __pyx_memoryview_obj *memview; + char *data; + Py_ssize_t shape[8]; + Py_ssize_t strides[8]; + Py_ssize_t suboffsets[8]; +} __Pyx_memviewslice; +#define __Pyx_MemoryView_Len(m) (m.shape[0]) + +/* #### Code section: numeric_typedefs ### */ + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":731 + * # in Cython to enable them only on the right systems. + * + * ctypedef npy_int8 int8_t # <<<<<<<<<<<<<< + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + */ +typedef npy_int8 __pyx_t_5numpy_int8_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":732 + * + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t # <<<<<<<<<<<<<< + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t + */ +typedef npy_int16 __pyx_t_5numpy_int16_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":733 + * ctypedef npy_int8 int8_t + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t # <<<<<<<<<<<<<< + * ctypedef npy_int64 int64_t + * #ctypedef npy_int96 int96_t + */ +typedef npy_int32 __pyx_t_5numpy_int32_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":734 + * ctypedef npy_int16 int16_t + * ctypedef npy_int32 int32_t + * ctypedef npy_int64 int64_t # <<<<<<<<<<<<<< + * #ctypedef npy_int96 int96_t + * #ctypedef npy_int128 int128_t + */ +typedef npy_int64 __pyx_t_5numpy_int64_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":738 + * #ctypedef npy_int128 int128_t + * + * ctypedef npy_uint8 uint8_t # <<<<<<<<<<<<<< + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + */ +typedef npy_uint8 __pyx_t_5numpy_uint8_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":739 + * + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t # <<<<<<<<<<<<<< + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t + */ +typedef npy_uint16 __pyx_t_5numpy_uint16_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":740 + * ctypedef npy_uint8 uint8_t + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t # <<<<<<<<<<<<<< + * ctypedef npy_uint64 uint64_t + * #ctypedef npy_uint96 uint96_t + */ +typedef npy_uint32 __pyx_t_5numpy_uint32_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":741 + * ctypedef npy_uint16 uint16_t + * ctypedef npy_uint32 uint32_t + * ctypedef npy_uint64 uint64_t # <<<<<<<<<<<<<< + * #ctypedef npy_uint96 uint96_t + * #ctypedef npy_uint128 uint128_t + */ +typedef npy_uint64 __pyx_t_5numpy_uint64_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":745 + * #ctypedef npy_uint128 uint128_t + * + * ctypedef npy_float32 float32_t # <<<<<<<<<<<<<< + * ctypedef npy_float64 float64_t + * #ctypedef npy_float80 float80_t + */ +typedef npy_float32 __pyx_t_5numpy_float32_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":746 + * + * ctypedef npy_float32 float32_t + * ctypedef npy_float64 float64_t # <<<<<<<<<<<<<< + * #ctypedef npy_float80 float80_t + * #ctypedef npy_float128 float128_t + */ +typedef npy_float64 __pyx_t_5numpy_float64_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":755 + * # The int types are mapped a bit surprising -- + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t + */ +typedef npy_long __pyx_t_5numpy_int_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":756 + * # numpy.int corresponds to 'l' and numpy.long to 'q' + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t # <<<<<<<<<<<<<< + * ctypedef npy_longlong longlong_t + * + */ +typedef npy_longlong __pyx_t_5numpy_long_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":757 + * ctypedef npy_long int_t + * ctypedef npy_longlong long_t + * ctypedef npy_longlong longlong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_ulong uint_t + */ +typedef npy_longlong __pyx_t_5numpy_longlong_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":759 + * ctypedef npy_longlong longlong_t + * + * ctypedef npy_ulong uint_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t + */ +typedef npy_ulong __pyx_t_5numpy_uint_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":760 + * + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t # <<<<<<<<<<<<<< + * ctypedef npy_ulonglong ulonglong_t + * + */ +typedef npy_ulonglong __pyx_t_5numpy_ulong_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":761 + * ctypedef npy_ulong uint_t + * ctypedef npy_ulonglong ulong_t + * ctypedef npy_ulonglong ulonglong_t # <<<<<<<<<<<<<< + * + * ctypedef npy_intp intp_t + */ +typedef npy_ulonglong __pyx_t_5numpy_ulonglong_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":763 + * ctypedef npy_ulonglong ulonglong_t + * + * ctypedef npy_intp intp_t # <<<<<<<<<<<<<< + * ctypedef npy_uintp uintp_t + * + */ +typedef npy_intp __pyx_t_5numpy_intp_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":764 + * + * ctypedef npy_intp intp_t + * ctypedef npy_uintp uintp_t # <<<<<<<<<<<<<< + * + * ctypedef npy_double float_t + */ +typedef npy_uintp __pyx_t_5numpy_uintp_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":766 + * ctypedef npy_uintp uintp_t + * + * ctypedef npy_double float_t # <<<<<<<<<<<<<< + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t + */ +typedef npy_double __pyx_t_5numpy_float_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":767 + * + * ctypedef npy_double float_t + * ctypedef npy_double double_t # <<<<<<<<<<<<<< + * ctypedef npy_longdouble longdouble_t + * + */ +typedef npy_double __pyx_t_5numpy_double_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":768 + * ctypedef npy_double float_t + * ctypedef npy_double double_t + * ctypedef npy_longdouble longdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cfloat cfloat_t + */ +typedef npy_longdouble __pyx_t_5numpy_longdouble_t; +/* #### Code section: complex_type_declarations ### */ +/* Declarations.proto */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #ifdef __cplusplus + typedef ::std::complex< float > __pyx_t_float_complex; + #else + typedef float _Complex __pyx_t_float_complex; + #endif +#else + typedef struct { float real, imag; } __pyx_t_float_complex; +#endif +static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float, float); + +/* Declarations.proto */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #ifdef __cplusplus + typedef ::std::complex< double > __pyx_t_double_complex; + #else + typedef double _Complex __pyx_t_double_complex; + #endif +#else + typedef struct { double real, imag; } __pyx_t_double_complex; +#endif +static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double, double); + +/* #### Code section: type_declarations ### */ + +/*--- Type declarations ---*/ +struct __pyx_array_obj; +struct __pyx_MemviewEnum_obj; +struct __pyx_memoryview_obj; +struct __pyx_memoryviewslice_obj; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":770 + * ctypedef npy_longdouble longdouble_t + * + * ctypedef npy_cfloat cfloat_t # <<<<<<<<<<<<<< + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t + */ +typedef npy_cfloat __pyx_t_5numpy_cfloat_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":771 + * + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t # <<<<<<<<<<<<<< + * ctypedef npy_clongdouble clongdouble_t + * + */ +typedef npy_cdouble __pyx_t_5numpy_cdouble_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":772 + * ctypedef npy_cfloat cfloat_t + * ctypedef npy_cdouble cdouble_t + * ctypedef npy_clongdouble clongdouble_t # <<<<<<<<<<<<<< + * + * ctypedef npy_cdouble complex_t + */ +typedef npy_clongdouble __pyx_t_5numpy_clongdouble_t; + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":774 + * ctypedef npy_clongdouble clongdouble_t + * + * ctypedef npy_cdouble complex_t # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew1(a): + */ +typedef npy_cdouble __pyx_t_5numpy_complex_t; + +/* "View.MemoryView":114 + * @cython.collection_type("sequence") + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ +struct __pyx_array_obj { + PyObject_HEAD + struct __pyx_vtabstruct_array *__pyx_vtab; + char *data; + Py_ssize_t len; + char *format; + int ndim; + Py_ssize_t *_shape; + Py_ssize_t *_strides; + Py_ssize_t itemsize; + PyObject *mode; + PyObject *_format; + void (*callback_free_data)(void *); + int free_data; + int dtype_is_object; +}; + + +/* "View.MemoryView":302 + * + * @cname('__pyx_MemviewEnum') + * cdef class Enum(object): # <<<<<<<<<<<<<< + * cdef object name + * def __init__(self, name): + */ +struct __pyx_MemviewEnum_obj { + PyObject_HEAD + PyObject *name; +}; + + +/* "View.MemoryView":337 + * + * @cname('__pyx_memoryview') + * cdef class memoryview: # <<<<<<<<<<<<<< + * + * cdef object obj + */ +struct __pyx_memoryview_obj { + PyObject_HEAD + struct __pyx_vtabstruct_memoryview *__pyx_vtab; + PyObject *obj; + PyObject *_size; + PyObject *_array_interface; + PyThread_type_lock lock; + __pyx_atomic_int_type acquisition_count; + Py_buffer view; + int flags; + int dtype_is_object; + __Pyx_TypeInfo *typeinfo; +}; + + +/* "View.MemoryView":952 + * @cython.collection_type("sequence") + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ +struct __pyx_memoryviewslice_obj { + struct __pyx_memoryview_obj __pyx_base; + __Pyx_memviewslice from_slice; + PyObject *from_object; + PyObject *(*to_object_func)(char *); + int (*to_dtype_func)(char *, PyObject *); +}; + + + +/* "View.MemoryView":114 + * @cython.collection_type("sequence") + * @cname("__pyx_array") + * cdef class array: # <<<<<<<<<<<<<< + * + * cdef: + */ + +struct __pyx_vtabstruct_array { + PyObject *(*get_memview)(struct __pyx_array_obj *); +}; +static struct __pyx_vtabstruct_array *__pyx_vtabptr_array; + + +/* "View.MemoryView":337 + * + * @cname('__pyx_memoryview') + * cdef class memoryview: # <<<<<<<<<<<<<< + * + * cdef object obj + */ + +struct __pyx_vtabstruct_memoryview { + char *(*get_item_pointer)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*is_slice)(struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_slice_assignment)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*setitem_slice_assign_scalar)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *); + PyObject *(*setitem_indexed)(struct __pyx_memoryview_obj *, PyObject *, PyObject *); + PyObject *(*convert_item_to_object)(struct __pyx_memoryview_obj *, char *); + PyObject *(*assign_item_from_object)(struct __pyx_memoryview_obj *, char *, PyObject *); + PyObject *(*_get_base)(struct __pyx_memoryview_obj *); +}; +static struct __pyx_vtabstruct_memoryview *__pyx_vtabptr_memoryview; + + +/* "View.MemoryView":952 + * @cython.collection_type("sequence") + * @cname('__pyx_memoryviewslice') + * cdef class _memoryviewslice(memoryview): # <<<<<<<<<<<<<< + * "Internal class for passing memoryview slices to Python" + * + */ + +struct __pyx_vtabstruct__memoryviewslice { + struct __pyx_vtabstruct_memoryview __pyx_base; +}; +static struct __pyx_vtabstruct__memoryviewslice *__pyx_vtabptr__memoryviewslice; +/* #### Code section: utility_code_proto ### */ + +/* --- Runtime support code (head) --- */ +/* Refnanny.proto */ +#ifndef CYTHON_REFNANNY + #define CYTHON_REFNANNY 0 +#endif +#if CYTHON_REFNANNY + typedef struct { + void (*INCREF)(void*, PyObject*, Py_ssize_t); + void (*DECREF)(void*, PyObject*, Py_ssize_t); + void (*GOTREF)(void*, PyObject*, Py_ssize_t); + void (*GIVEREF)(void*, PyObject*, Py_ssize_t); + void* (*SetupContext)(const char*, Py_ssize_t, const char*); + void (*FinishContext)(void**); + } __Pyx_RefNannyAPIStruct; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNanny = NULL; + static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname); + #define __Pyx_RefNannyDeclarations void *__pyx_refnanny = NULL; +#ifdef WITH_THREAD + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + if (acquire_gil) {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ + PyGILState_Release(__pyx_gilstate_save);\ + } else {\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__));\ + } + #define __Pyx_RefNannyFinishContextNogil() {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __Pyx_RefNannyFinishContext();\ + PyGILState_Release(__pyx_gilstate_save);\ + } +#else + #define __Pyx_RefNannySetupContext(name, acquire_gil)\ + __pyx_refnanny = __Pyx_RefNanny->SetupContext((name), (__LINE__), (__FILE__)) + #define __Pyx_RefNannyFinishContextNogil() __Pyx_RefNannyFinishContext() +#endif + #define __Pyx_RefNannyFinishContextNogil() {\ + PyGILState_STATE __pyx_gilstate_save = PyGILState_Ensure();\ + __Pyx_RefNannyFinishContext();\ + PyGILState_Release(__pyx_gilstate_save);\ + } + #define __Pyx_RefNannyFinishContext()\ + __Pyx_RefNanny->FinishContext(&__pyx_refnanny) + #define __Pyx_INCREF(r) __Pyx_RefNanny->INCREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_DECREF(r) __Pyx_RefNanny->DECREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GOTREF(r) __Pyx_RefNanny->GOTREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_GIVEREF(r) __Pyx_RefNanny->GIVEREF(__pyx_refnanny, (PyObject *)(r), (__LINE__)) + #define __Pyx_XINCREF(r) do { if((r) == NULL); else {__Pyx_INCREF(r); }} while(0) + #define __Pyx_XDECREF(r) do { if((r) == NULL); else {__Pyx_DECREF(r); }} while(0) + #define __Pyx_XGOTREF(r) do { if((r) == NULL); else {__Pyx_GOTREF(r); }} while(0) + #define __Pyx_XGIVEREF(r) do { if((r) == NULL); else {__Pyx_GIVEREF(r);}} while(0) +#else + #define __Pyx_RefNannyDeclarations + #define __Pyx_RefNannySetupContext(name, acquire_gil) + #define __Pyx_RefNannyFinishContextNogil() + #define __Pyx_RefNannyFinishContext() + #define __Pyx_INCREF(r) Py_INCREF(r) + #define __Pyx_DECREF(r) Py_DECREF(r) + #define __Pyx_GOTREF(r) + #define __Pyx_GIVEREF(r) + #define __Pyx_XINCREF(r) Py_XINCREF(r) + #define __Pyx_XDECREF(r) Py_XDECREF(r) + #define __Pyx_XGOTREF(r) + #define __Pyx_XGIVEREF(r) +#endif +#define __Pyx_Py_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; Py_XDECREF(tmp);\ + } while (0) +#define __Pyx_XDECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_XDECREF(tmp);\ + } while (0) +#define __Pyx_DECREF_SET(r, v) do {\ + PyObject *tmp = (PyObject *) r;\ + r = v; __Pyx_DECREF(tmp);\ + } while (0) +#define __Pyx_CLEAR(r) do { PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);} while(0) +#define __Pyx_XCLEAR(r) do { if((r) != NULL) {PyObject* tmp = ((PyObject*)(r)); r = NULL; __Pyx_DECREF(tmp);}} while(0) + +/* PyErrExceptionMatches.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_ExceptionMatches(err) __Pyx_PyErr_ExceptionMatchesInState(__pyx_tstate, err) +static CYTHON_INLINE int __Pyx_PyErr_ExceptionMatchesInState(PyThreadState* tstate, PyObject* err); +#else +#define __Pyx_PyErr_ExceptionMatches(err) PyErr_ExceptionMatches(err) +#endif + +/* PyThreadStateGet.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyThreadState_declare PyThreadState *__pyx_tstate; +#define __Pyx_PyThreadState_assign __pyx_tstate = __Pyx_PyThreadState_Current; +#if PY_VERSION_HEX >= 0x030C00A6 +#define __Pyx_PyErr_Occurred() (__pyx_tstate->current_exception != NULL) +#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->current_exception ? (PyObject*) Py_TYPE(__pyx_tstate->current_exception) : (PyObject*) NULL) +#else +#define __Pyx_PyErr_Occurred() (__pyx_tstate->curexc_type != NULL) +#define __Pyx_PyErr_CurrentExceptionType() (__pyx_tstate->curexc_type) +#endif +#else +#define __Pyx_PyThreadState_declare +#define __Pyx_PyThreadState_assign +#define __Pyx_PyErr_Occurred() (PyErr_Occurred() != NULL) +#define __Pyx_PyErr_CurrentExceptionType() PyErr_Occurred() +#endif + +/* PyErrFetchRestore.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_PyErr_Clear() __Pyx_ErrRestore(NULL, NULL, NULL) +#define __Pyx_ErrRestoreWithState(type, value, tb) __Pyx_ErrRestoreInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) __Pyx_ErrFetchInState(PyThreadState_GET(), type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) __Pyx_ErrRestoreInState(__pyx_tstate, type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) __Pyx_ErrFetchInState(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A6 +#define __Pyx_PyErr_SetNone(exc) (Py_INCREF(exc), __Pyx_ErrRestore((exc), NULL, NULL)) +#else +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#endif +#else +#define __Pyx_PyErr_Clear() PyErr_Clear() +#define __Pyx_PyErr_SetNone(exc) PyErr_SetNone(exc) +#define __Pyx_ErrRestoreWithState(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchWithState(type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestoreInState(tstate, type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetchInState(tstate, type, value, tb) PyErr_Fetch(type, value, tb) +#define __Pyx_ErrRestore(type, value, tb) PyErr_Restore(type, value, tb) +#define __Pyx_ErrFetch(type, value, tb) PyErr_Fetch(type, value, tb) +#endif + +/* PyObjectGetAttrStr.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GetAttrStr(o,n) PyObject_GetAttr(o,n) +#endif + +/* PyObjectGetAttrStrNoError.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name); + +/* GetBuiltinName.proto */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name); + +/* TupleAndListFromArray.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n); +static CYTHON_INLINE PyObject* __Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n); +#endif + +/* IncludeStringH.proto */ +#include + +/* BytesEquals.proto */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals); + +/* UnicodeEquals.proto */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals); + +/* fastcall.proto */ +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_Arg_VARARGS(args, i) PySequence_GetItem(args, i) +#elif CYTHON_ASSUME_SAFE_MACROS + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GET_ITEM(args, i) +#else + #define __Pyx_Arg_VARARGS(args, i) PyTuple_GetItem(args, i) +#endif +#if CYTHON_AVOID_BORROWED_REFS + #define __Pyx_Arg_NewRef_VARARGS(arg) __Pyx_NewRef(arg) + #define __Pyx_Arg_XDECREF_VARARGS(arg) Py_XDECREF(arg) +#else + #define __Pyx_Arg_NewRef_VARARGS(arg) arg // no-op + #define __Pyx_Arg_XDECREF_VARARGS(arg) // no-op - arg is borrowed +#endif +#define __Pyx_NumKwargs_VARARGS(kwds) PyDict_Size(kwds) +#define __Pyx_KwValues_VARARGS(args, nargs) NULL +#define __Pyx_GetKwValue_VARARGS(kw, kwvalues, s) __Pyx_PyDict_GetItemStrWithError(kw, s) +#define __Pyx_KwargsAsDict_VARARGS(kw, kwvalues) PyDict_Copy(kw) +#if CYTHON_METH_FASTCALL + #define __Pyx_Arg_FASTCALL(args, i) args[i] + #define __Pyx_NumKwargs_FASTCALL(kwds) PyTuple_GET_SIZE(kwds) + #define __Pyx_KwValues_FASTCALL(args, nargs) ((args) + (nargs)) + static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s); + #define __Pyx_KwargsAsDict_FASTCALL(kw, kwvalues) _PyStack_AsDict(kwvalues, kw) + #define __Pyx_Arg_NewRef_FASTCALL(arg) arg // no-op, __Pyx_Arg_FASTCALL is direct and this needs + #define __Pyx_Arg_XDECREF_FASTCALL(arg) // no-op - arg was returned from array +#else + #define __Pyx_Arg_FASTCALL __Pyx_Arg_VARARGS + #define __Pyx_NumKwargs_FASTCALL __Pyx_NumKwargs_VARARGS + #define __Pyx_KwValues_FASTCALL __Pyx_KwValues_VARARGS + #define __Pyx_GetKwValue_FASTCALL __Pyx_GetKwValue_VARARGS + #define __Pyx_KwargsAsDict_FASTCALL __Pyx_KwargsAsDict_VARARGS + #define __Pyx_Arg_NewRef_FASTCALL(arg) __Pyx_Arg_NewRef_VARARGS(arg) + #define __Pyx_Arg_XDECREF_FASTCALL(arg) __Pyx_Arg_XDECREF_VARARGS(arg) +#endif +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_VARARGS(args, start), stop - start) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) __Pyx_PyTuple_FromArray(&__Pyx_Arg_FASTCALL(args, start), stop - start) +#else +#define __Pyx_ArgsSlice_VARARGS(args, start, stop) PyTuple_GetSlice(args, start, stop) +#define __Pyx_ArgsSlice_FASTCALL(args, start, stop) PyTuple_GetSlice(args, start, stop) +#endif + +/* RaiseArgTupleInvalid.proto */ +static void __Pyx_RaiseArgtupleInvalid(const char* func_name, int exact, + Py_ssize_t num_min, Py_ssize_t num_max, Py_ssize_t num_found); + +/* RaiseDoubleKeywords.proto */ +static void __Pyx_RaiseDoubleKeywordsError(const char* func_name, PyObject* kw_name); + +/* ParseKeywords.proto */ +static int __Pyx_ParseOptionalKeywords(PyObject *kwds, PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, PyObject *values[], Py_ssize_t num_pos_args, + const char* function_name); + +/* ArgTypeTest.proto */ +#define __Pyx_ArgTypeTest(obj, type, none_allowed, name, exact)\ + ((likely(__Pyx_IS_TYPE(obj, type) | (none_allowed && (obj == Py_None)))) ? 1 :\ + __Pyx__ArgTypeTest(obj, type, name, exact)) +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact); + +/* RaiseException.proto */ +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause); + +/* PyFunctionFastCall.proto */ +#if CYTHON_FAST_PYCALL +#if !CYTHON_VECTORCALL +#define __Pyx_PyFunction_FastCall(func, args, nargs)\ + __Pyx_PyFunction_FastCallDict((func), (args), (nargs), NULL) +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs); +#endif +#define __Pyx_BUILD_ASSERT_EXPR(cond)\ + (sizeof(char [1 - 2*!(cond)]) - 1) +#ifndef Py_MEMBER_SIZE +#define Py_MEMBER_SIZE(type, member) sizeof(((type *)0)->member) +#endif +#if !CYTHON_VECTORCALL +#if PY_VERSION_HEX >= 0x03080000 + #include "frameobject.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif + #define __Pxy_PyFrame_Initialize_Offsets() + #define __Pyx_PyFrame_GetLocalsplus(frame) ((frame)->f_localsplus) +#else + static size_t __pyx_pyframe_localsplus_offset = 0; + #include "frameobject.h" + #define __Pxy_PyFrame_Initialize_Offsets()\ + ((void)__Pyx_BUILD_ASSERT_EXPR(sizeof(PyFrameObject) == offsetof(PyFrameObject, f_localsplus) + Py_MEMBER_SIZE(PyFrameObject, f_localsplus)),\ + (void)(__pyx_pyframe_localsplus_offset = ((size_t)PyFrame_Type.tp_basicsize) - Py_MEMBER_SIZE(PyFrameObject, f_localsplus))) + #define __Pyx_PyFrame_GetLocalsplus(frame)\ + (assert(__pyx_pyframe_localsplus_offset), (PyObject **)(((char *)(frame)) + __pyx_pyframe_localsplus_offset)) +#endif +#endif +#endif + +/* PyObjectCall.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw); +#else +#define __Pyx_PyObject_Call(func, arg, kw) PyObject_Call(func, arg, kw) +#endif + +/* PyObjectCallMethO.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg); +#endif + +/* PyObjectFastCall.proto */ +#define __Pyx_PyObject_FastCall(func, args, nargs) __Pyx_PyObject_FastCallDict(func, args, (size_t)(nargs), NULL) +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs); + +/* RaiseUnexpectedTypeError.proto */ +static int __Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj); + +/* GCCDiagnostics.proto */ +#if !defined(__INTEL_COMPILER) && defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 6)) +#define __Pyx_HAS_GCC_DIAGNOSTIC +#endif + +/* BuildPyUnicode.proto */ +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char); + +/* CIntToPyUnicode.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char); + +/* CIntToPyUnicode.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char); + +/* JoinPyUnicode.proto */ +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char); + +/* StrEquals.proto */ +#if PY_MAJOR_VERSION >= 3 +#define __Pyx_PyString_Equals __Pyx_PyUnicode_Equals +#else +#define __Pyx_PyString_Equals __Pyx_PyBytes_Equals +#endif + +/* PyObjectFormatSimple.proto */ +#if CYTHON_COMPILING_IN_PYPY + #define __Pyx_PyObject_FormatSimple(s, f) (\ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ + PyObject_Format(s, f)) +#elif PY_MAJOR_VERSION < 3 + #define __Pyx_PyObject_FormatSimple(s, f) (\ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ + likely(PyString_CheckExact(s)) ? PyUnicode_FromEncodedObject(s, NULL, "strict") :\ + PyObject_Format(s, f)) +#elif CYTHON_USE_TYPE_SLOTS + #define __Pyx_PyObject_FormatSimple(s, f) (\ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ + likely(PyLong_CheckExact(s)) ? PyLong_Type.tp_repr(s) :\ + likely(PyFloat_CheckExact(s)) ? PyFloat_Type.tp_repr(s) :\ + PyObject_Format(s, f)) +#else + #define __Pyx_PyObject_FormatSimple(s, f) (\ + likely(PyUnicode_CheckExact(s)) ? (Py_INCREF(s), s) :\ + PyObject_Format(s, f)) +#endif + +CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *); /*proto*/ +/* GetAttr.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *, PyObject *); + +/* GetItemInt.proto */ +#define __Pyx_GetItemInt(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Fast(o, (Py_ssize_t)i, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL) :\ + __Pyx_GetItemInt_Generic(o, to_py_func(i)))) +#define __Pyx_GetItemInt_List(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_List_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "list index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +#define __Pyx_GetItemInt_Tuple(o, i, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_GetItemInt_Tuple_Fast(o, (Py_ssize_t)i, wraparound, boundscheck) :\ + (PyErr_SetString(PyExc_IndexError, "tuple index out of range"), (PyObject*)NULL)) +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + int wraparound, int boundscheck); +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j); +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, + int is_list, int wraparound, int boundscheck); + +/* PyObjectCallOneArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg); + +/* ObjectGetItem.proto */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key); +#else +#define __Pyx_PyObject_GetItem(obj, key) PyObject_GetItem(obj, key) +#endif + +/* KeywordStringCheck.proto */ +static int __Pyx_CheckKeywordStrings(PyObject *kw, const char* function_name, int kw_allowed); + +/* DivInt[Py_ssize_t].proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t, Py_ssize_t); + +/* UnaryNegOverflows.proto */ +#define __Pyx_UNARY_NEG_WOULD_OVERFLOW(x)\ + (((x) < 0) & ((unsigned long)(x) == 0-(unsigned long)(x))) + +/* GetAttr3.proto */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *, PyObject *, PyObject *); + +/* PyDictVersioning.proto */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +#define __PYX_DICT_VERSION_INIT ((PY_UINT64_T) -1) +#define __PYX_GET_DICT_VERSION(dict) (((PyDictObject*)(dict))->ma_version_tag) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var)\ + (version_var) = __PYX_GET_DICT_VERSION(dict);\ + (cache_var) = (value); +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + if (likely(__PYX_GET_DICT_VERSION(DICT) == __pyx_dict_version)) {\ + (VAR) = __pyx_dict_cached_value;\ + } else {\ + (VAR) = __pyx_dict_cached_value = (LOOKUP);\ + __pyx_dict_version = __PYX_GET_DICT_VERSION(DICT);\ + }\ +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj); +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj); +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version); +#else +#define __PYX_GET_DICT_VERSION(dict) (0) +#define __PYX_UPDATE_DICT_CACHE(dict, value, cache_var, version_var) +#define __PYX_PY_DICT_LOOKUP_IF_MODIFIED(VAR, DICT, LOOKUP) (VAR) = (LOOKUP); +#endif + +/* GetModuleGlobalName.proto */ +#if CYTHON_USE_DICT_VERSIONS +#define __Pyx_GetModuleGlobalName(var, name) do {\ + static PY_UINT64_T __pyx_dict_version = 0;\ + static PyObject *__pyx_dict_cached_value = NULL;\ + (var) = (likely(__pyx_dict_version == __PYX_GET_DICT_VERSION(__pyx_d))) ?\ + (likely(__pyx_dict_cached_value) ? __Pyx_NewRef(__pyx_dict_cached_value) : __Pyx_GetBuiltinName(name)) :\ + __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +#define __Pyx_GetModuleGlobalNameUncached(var, name) do {\ + PY_UINT64_T __pyx_dict_version;\ + PyObject *__pyx_dict_cached_value;\ + (var) = __Pyx__GetModuleGlobalName(name, &__pyx_dict_version, &__pyx_dict_cached_value);\ +} while(0) +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value); +#else +#define __Pyx_GetModuleGlobalName(var, name) (var) = __Pyx__GetModuleGlobalName(name) +#define __Pyx_GetModuleGlobalNameUncached(var, name) (var) = __Pyx__GetModuleGlobalName(name) +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name); +#endif + +/* AssertionsEnabled.proto */ +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX < 0x02070600 && !defined(Py_OptimizeFlag) + #define __Pyx_init_assertions_enabled() (0) + #define __pyx_assertions_enabled() (1) +#elif CYTHON_COMPILING_IN_LIMITED_API || (CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030C0000) + static int __pyx_assertions_enabled_flag; + #define __pyx_assertions_enabled() (__pyx_assertions_enabled_flag) + static int __Pyx_init_assertions_enabled(void) { + PyObject *builtins, *debug, *debug_str; + int flag; + builtins = PyEval_GetBuiltins(); + if (!builtins) goto bad; + debug_str = PyUnicode_FromStringAndSize("__debug__", 9); + if (!debug_str) goto bad; + debug = PyObject_GetItem(builtins, debug_str); + Py_DECREF(debug_str); + if (!debug) goto bad; + flag = PyObject_IsTrue(debug); + Py_DECREF(debug); + if (flag == -1) goto bad; + __pyx_assertions_enabled_flag = flag; + return 0; + bad: + __pyx_assertions_enabled_flag = 1; + return -1; + } +#else + #define __Pyx_init_assertions_enabled() (0) + #define __pyx_assertions_enabled() (!Py_OptimizeFlag) +#endif + +/* RaiseTooManyValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected); + +/* RaiseNeedMoreValuesToUnpack.proto */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index); + +/* RaiseNoneIterError.proto */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void); + +/* ExtTypeTest.proto */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type); + +/* GetTopmostException.proto */ +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * __Pyx_PyErr_GetTopmostException(PyThreadState *tstate); +#endif + +/* SaveResetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSave(type, value, tb) __Pyx__ExceptionSave(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#define __Pyx_ExceptionReset(type, value, tb) __Pyx__ExceptionReset(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb); +#else +#define __Pyx_ExceptionSave(type, value, tb) PyErr_GetExcInfo(type, value, tb) +#define __Pyx_ExceptionReset(type, value, tb) PyErr_SetExcInfo(type, value, tb) +#endif + +/* GetException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_GetException(type, value, tb) __Pyx__GetException(__pyx_tstate, type, value, tb) +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* SwapException.proto */ +#if CYTHON_FAST_THREAD_STATE +#define __Pyx_ExceptionSwap(type, value, tb) __Pyx__ExceptionSwap(__pyx_tstate, type, value, tb) +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb); +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb); +#endif + +/* Import.proto */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level); + +/* ImportDottedModule.proto */ +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple); +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple); +#endif + +/* ssize_strlen.proto */ +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s); + +/* FastTypeChecks.proto */ +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_TypeCheck(obj, type) __Pyx_IsSubtype(Py_TYPE(obj), (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) __Pyx_IsAnySubtype2(Py_TYPE(obj), (PyTypeObject *)type1, (PyTypeObject *)type2) +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches(PyObject *err, PyObject *type); +static CYTHON_INLINE int __Pyx_PyErr_GivenExceptionMatches2(PyObject *err, PyObject *type1, PyObject *type2); +#else +#define __Pyx_TypeCheck(obj, type) PyObject_TypeCheck(obj, (PyTypeObject *)type) +#define __Pyx_TypeCheck2(obj, type1, type2) (PyObject_TypeCheck(obj, (PyTypeObject *)type1) || PyObject_TypeCheck(obj, (PyTypeObject *)type2)) +#define __Pyx_PyErr_GivenExceptionMatches(err, type) PyErr_GivenExceptionMatches(err, type) +#define __Pyx_PyErr_GivenExceptionMatches2(err, type1, type2) (PyErr_GivenExceptionMatches(err, type1) || PyErr_GivenExceptionMatches(err, type2)) +#endif +#define __Pyx_PyErr_ExceptionMatches2(err1, err2) __Pyx_PyErr_GivenExceptionMatches2(__Pyx_PyErr_CurrentExceptionType(), err1, err2) +#define __Pyx_PyException_Check(obj) __Pyx_TypeCheck(obj, PyExc_Exception) + +CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +/* ListCompAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_ListComp_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len)) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_ListComp_Append(L,x) PyList_Append(L,x) +#endif + +/* PySequenceMultiply.proto */ +#define __Pyx_PySequence_Multiply_Left(mul, seq) __Pyx_PySequence_Multiply(seq, mul) +static CYTHON_INLINE PyObject* __Pyx_PySequence_Multiply(PyObject *seq, Py_ssize_t mul); + +/* SetItemInt.proto */ +#define __Pyx_SetItemInt(o, i, v, type, is_signed, to_py_func, is_list, wraparound, boundscheck)\ + (__Pyx_fits_Py_ssize_t(i, type, is_signed) ?\ + __Pyx_SetItemInt_Fast(o, (Py_ssize_t)i, v, is_list, wraparound, boundscheck) :\ + (is_list ? (PyErr_SetString(PyExc_IndexError, "list assignment index out of range"), -1) :\ + __Pyx_SetItemInt_Generic(o, to_py_func(i), v))) +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v); +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, + int is_list, int wraparound, int boundscheck); + +/* RaiseUnboundLocalError.proto */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname); + +/* DivInt[long].proto */ +static CYTHON_INLINE long __Pyx_div_long(long, long); + +/* PySequenceContains.proto */ +static CYTHON_INLINE int __Pyx_PySequence_ContainsTF(PyObject* item, PyObject* seq, int eq) { + int result = PySequence_Contains(seq, item); + return unlikely(result < 0) ? result : (result == (eq == Py_EQ)); +} + +/* ImportFrom.proto */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name); + +/* HasAttr.proto */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *, PyObject *); + +/* PyFloatBinop.proto */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyFloat_TrueDivideCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check); +#else +#define __Pyx_PyFloat_TrueDivideCObj(op1, op2, floatval, inplace, zerodivision_check)\ + (inplace ? PyNumber_InPlaceTrueDivide(op1, op2) : PyNumber_TrueDivide(op1, op2)) +#endif + +/* UnpackUnboundCMethod.proto */ +typedef struct { + PyObject *type; + PyObject **method_name; + PyCFunction func; + PyObject *method; + int flag; +} __Pyx_CachedCFunction; + +/* CallUnboundCMethod0.proto */ +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self); +#if CYTHON_COMPILING_IN_CPYTHON +#define __Pyx_CallUnboundCMethod0(cfunc, self)\ + (likely((cfunc)->func) ?\ + (likely((cfunc)->flag == METH_NOARGS) ? (*((cfunc)->func))(self, NULL) :\ + (PY_VERSION_HEX >= 0x030600B1 && likely((cfunc)->flag == METH_FASTCALL) ?\ + (PY_VERSION_HEX >= 0x030700A0 ?\ + (*(__Pyx_PyCFunctionFast)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0) :\ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL)) :\ + (PY_VERSION_HEX >= 0x030700A0 && (cfunc)->flag == (METH_FASTCALL | METH_KEYWORDS) ?\ + (*(__Pyx_PyCFunctionFastWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, &__pyx_empty_tuple, 0, NULL) :\ + (likely((cfunc)->flag == (METH_VARARGS | METH_KEYWORDS)) ? ((*(PyCFunctionWithKeywords)(void*)(PyCFunction)(cfunc)->func)(self, __pyx_empty_tuple, NULL)) :\ + ((cfunc)->flag == METH_VARARGS ? (*((cfunc)->func))(self, __pyx_empty_tuple) :\ + __Pyx__CallUnboundCMethod0(cfunc, self)))))) :\ + __Pyx__CallUnboundCMethod0(cfunc, self)) +#else +#define __Pyx_CallUnboundCMethod0(cfunc, self) __Pyx__CallUnboundCMethod0(cfunc, self) +#endif + +/* ListAppend.proto */ +#if CYTHON_USE_PYLIST_INTERNALS && CYTHON_ASSUME_SAFE_MACROS +static CYTHON_INLINE int __Pyx_PyList_Append(PyObject* list, PyObject* x) { + PyListObject* L = (PyListObject*) list; + Py_ssize_t len = Py_SIZE(list); + if (likely(L->allocated > len) & likely(len > (L->allocated >> 1))) { + Py_INCREF(x); + PyList_SET_ITEM(list, len, x); + __Pyx_SET_SIZE(list, len + 1); + return 0; + } + return PyList_Append(list, x); +} +#else +#define __Pyx_PyList_Append(L,x) PyList_Append(L,x) +#endif + +/* PyObject_GenericGetAttrNoDict.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttrNoDict PyObject_GenericGetAttr +#endif + +/* PyObject_GenericGetAttr.proto */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name); +#else +#define __Pyx_PyObject_GenericGetAttr PyObject_GenericGetAttr +#endif + +/* IncludeStructmemberH.proto */ +#include + +/* FixUpExtensionType.proto */ +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type); +#endif + +/* PyObjectCallNoArg.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func); + +/* PyObjectGetMethod.proto */ +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method); + +/* PyObjectCallMethod0.proto */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name); + +/* ValidateBasesTuple.proto */ +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases); +#endif + +/* PyType_Ready.proto */ +CYTHON_UNUSED static int __Pyx_PyType_Ready(PyTypeObject *t); + +/* SetVTable.proto */ +static int __Pyx_SetVtable(PyTypeObject* typeptr , void* vtable); + +/* GetVTable.proto */ +static void* __Pyx_GetVtable(PyTypeObject *type); + +/* MergeVTables.proto */ +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_MergeVtables(PyTypeObject *type); +#endif + +/* SetupReduce.proto */ +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_setup_reduce(PyObject* type_obj); +#endif + +/* TypeImport.proto */ +#ifndef __PYX_HAVE_RT_ImportType_proto_3_0_2 +#define __PYX_HAVE_RT_ImportType_proto_3_0_2 +#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L +#include +#endif +#if (defined (__STDC_VERSION__) && __STDC_VERSION__ >= 201112L) || __cplusplus >= 201103L +#define __PYX_GET_STRUCT_ALIGNMENT_3_0_2(s) alignof(s) +#else +#define __PYX_GET_STRUCT_ALIGNMENT_3_0_2(s) sizeof(void*) +#endif +enum __Pyx_ImportType_CheckSize_3_0_2 { + __Pyx_ImportType_CheckSize_Error_3_0_2 = 0, + __Pyx_ImportType_CheckSize_Warn_3_0_2 = 1, + __Pyx_ImportType_CheckSize_Ignore_3_0_2 = 2 +}; +static PyTypeObject *__Pyx_ImportType_3_0_2(PyObject* module, const char *module_name, const char *class_name, size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_3_0_2 check_size); +#endif + +/* FetchSharedCythonModule.proto */ +static PyObject *__Pyx_FetchSharedCythonABIModule(void); + +/* FetchCommonType.proto */ +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type); +#else +static PyTypeObject* __Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases); +#endif + +/* PyMethodNew.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + PyObject *typesModule=NULL, *methodType=NULL, *result=NULL; + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + typesModule = PyImport_ImportModule("types"); + if (!typesModule) return NULL; + methodType = PyObject_GetAttrString(typesModule, "MethodType"); + Py_DECREF(typesModule); + if (!methodType) return NULL; + result = PyObject_CallFunctionObjArgs(methodType, func, self, NULL); + Py_DECREF(methodType); + return result; +} +#elif PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_PyMethod_New(PyObject *func, PyObject *self, PyObject *typ) { + CYTHON_UNUSED_VAR(typ); + if (!self) + return __Pyx_NewRef(func); + return PyMethod_New(func, self); +} +#else + #define __Pyx_PyMethod_New PyMethod_New +#endif + +/* PyVectorcallFastCallDict.proto */ +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw); +#endif + +/* CythonFunctionShared.proto */ +#define __Pyx_CyFunction_USED +#define __Pyx_CYFUNCTION_STATICMETHOD 0x01 +#define __Pyx_CYFUNCTION_CLASSMETHOD 0x02 +#define __Pyx_CYFUNCTION_CCLASS 0x04 +#define __Pyx_CYFUNCTION_COROUTINE 0x08 +#define __Pyx_CyFunction_GetClosure(f)\ + (((__pyx_CyFunctionObject *) (f))->func_closure) +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + #define __Pyx_CyFunction_GetClassObj(f)\ + (((__pyx_CyFunctionObject *) (f))->func_classobj) +#else + #define __Pyx_CyFunction_GetClassObj(f)\ + ((PyObject*) ((PyCMethodObject *) (f))->mm_class) +#endif +#define __Pyx_CyFunction_SetClassObj(f, classobj)\ + __Pyx__CyFunction_SetClassObj((__pyx_CyFunctionObject *) (f), (classobj)) +#define __Pyx_CyFunction_Defaults(type, f)\ + ((type *)(((__pyx_CyFunctionObject *) (f))->defaults)) +#define __Pyx_CyFunction_SetDefaultsGetter(f, g)\ + ((__pyx_CyFunctionObject *) (f))->defaults_getter = (g) +typedef struct { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject_HEAD + PyObject *func; +#elif PY_VERSION_HEX < 0x030900B1 + PyCFunctionObject func; +#else + PyCMethodObject func; +#endif +#if CYTHON_BACKPORT_VECTORCALL + __pyx_vectorcallfunc func_vectorcall; +#endif +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_weakreflist; +#endif + PyObject *func_dict; + PyObject *func_name; + PyObject *func_qualname; + PyObject *func_doc; + PyObject *func_globals; + PyObject *func_code; + PyObject *func_closure; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + PyObject *func_classobj; +#endif + void *defaults; + int defaults_pyobjects; + size_t defaults_size; // used by FusedFunction for copying defaults + int flags; + PyObject *defaults_tuple; + PyObject *defaults_kwdict; + PyObject *(*defaults_getter)(PyObject *); + PyObject *func_annotations; + PyObject *func_is_coroutine; +} __pyx_CyFunctionObject; +#define __Pyx_CyFunction_Check(obj) __Pyx_TypeCheck(obj, __pyx_CyFunctionType) +#define __Pyx_IsCyOrPyCFunction(obj) __Pyx_TypeCheck2(obj, __pyx_CyFunctionType, &PyCFunction_Type) +#define __Pyx_CyFunction_CheckExact(obj) __Pyx_IS_TYPE(obj, __pyx_CyFunctionType) +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject* op, PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj); +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *m, + size_t size, + int pyobjects); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *m, + PyObject *tuple); +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *m, + PyObject *dict); +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *m, + PyObject *dict); +static int __pyx_CyFunction_init(PyObject *module); +#if CYTHON_METH_FASTCALL +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames); +#if CYTHON_BACKPORT_VECTORCALL +#define __Pyx_CyFunction_func_vectorcall(f) (((__pyx_CyFunctionObject*)f)->func_vectorcall) +#else +#define __Pyx_CyFunction_func_vectorcall(f) (((PyCFunctionObject*)f)->vectorcall) +#endif +#endif + +/* CythonFunction.proto */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, + int flags, PyObject* qualname, + PyObject *closure, + PyObject *module, PyObject *globals, + PyObject* code); + +/* CLineInTraceback.proto */ +#ifdef CYTHON_CLINE_IN_TRACEBACK +#define __Pyx_CLineForTraceback(tstate, c_line) (((CYTHON_CLINE_IN_TRACEBACK)) ? c_line : 0) +#else +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line); +#endif + +/* CodeObjectCache.proto */ +#if !CYTHON_COMPILING_IN_LIMITED_API +typedef struct { + PyCodeObject* code_object; + int code_line; +} __Pyx_CodeObjectCacheEntry; +struct __Pyx_CodeObjectCache { + int count; + int max_count; + __Pyx_CodeObjectCacheEntry* entries; +}; +static struct __Pyx_CodeObjectCache __pyx_code_cache = {0,0,NULL}; +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line); +static PyCodeObject *__pyx_find_code_object(int code_line); +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object); +#endif + +/* AddTraceback.proto */ +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename); + +#if PY_MAJOR_VERSION < 3 + static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags); + static void __Pyx_ReleaseBuffer(Py_buffer *view); +#else + #define __Pyx_GetBuffer PyObject_GetBuffer + #define __Pyx_ReleaseBuffer PyBuffer_Release +#endif + + +/* BufferStructDeclare.proto */ +typedef struct { + Py_ssize_t shape, strides, suboffsets; +} __Pyx_Buf_DimInfo; +typedef struct { + size_t refcount; + Py_buffer pybuffer; +} __Pyx_Buffer; +typedef struct { + __Pyx_Buffer *rcbuffer; + char *data; + __Pyx_Buf_DimInfo diminfo[8]; +} __Pyx_LocalBuf_ND; + +/* MemviewSliceIsContig.proto */ +static int __pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim); + +/* OverlappingSlices.proto */ +static int __pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize); + +/* IsLittleEndian.proto */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void); + +/* BufferFormatCheck.proto */ +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts); +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type); + +/* TypeInfoCompare.proto */ +static int __pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b); + +/* MemviewSliceValidateAndInit.proto */ +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_float(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_float(PyObject *, int writable_flag); + +/* ObjectToMemviewSlice.proto */ +static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *, int writable_flag); + +/* MemviewDtypeToObject.proto */ +static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp); +static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj); + +/* RealImag.proto */ +#if CYTHON_CCOMPLEX + #ifdef __cplusplus + #define __Pyx_CREAL(z) ((z).real()) + #define __Pyx_CIMAG(z) ((z).imag()) + #else + #define __Pyx_CREAL(z) (__real__(z)) + #define __Pyx_CIMAG(z) (__imag__(z)) + #endif +#else + #define __Pyx_CREAL(z) ((z).real) + #define __Pyx_CIMAG(z) ((z).imag) +#endif +#if defined(__cplusplus) && CYTHON_CCOMPLEX\ + && (defined(_WIN32) || defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 5 || __GNUC__ == 4 && __GNUC_MINOR__ >= 4 )) || __cplusplus >= 201103) + #define __Pyx_SET_CREAL(z,x) ((z).real(x)) + #define __Pyx_SET_CIMAG(z,y) ((z).imag(y)) +#else + #define __Pyx_SET_CREAL(z,x) __Pyx_CREAL(z) = (x) + #define __Pyx_SET_CIMAG(z,y) __Pyx_CIMAG(z) = (y) +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #define __Pyx_c_eq_float(a, b) ((a)==(b)) + #define __Pyx_c_sum_float(a, b) ((a)+(b)) + #define __Pyx_c_diff_float(a, b) ((a)-(b)) + #define __Pyx_c_prod_float(a, b) ((a)*(b)) + #define __Pyx_c_quot_float(a, b) ((a)/(b)) + #define __Pyx_c_neg_float(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_float(z) ((z)==(float)0) + #define __Pyx_c_conj_float(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_float(z) (::std::abs(z)) + #define __Pyx_c_pow_float(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_float(z) ((z)==0) + #define __Pyx_c_conj_float(z) (conjf(z)) + #if 1 + #define __Pyx_c_abs_float(z) (cabsf(z)) + #define __Pyx_c_pow_float(a, b) (cpowf(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex, __pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex); + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex); + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex, __pyx_t_float_complex); + #endif +#endif + +/* Arithmetic.proto */ +#if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #define __Pyx_c_eq_double(a, b) ((a)==(b)) + #define __Pyx_c_sum_double(a, b) ((a)+(b)) + #define __Pyx_c_diff_double(a, b) ((a)-(b)) + #define __Pyx_c_prod_double(a, b) ((a)*(b)) + #define __Pyx_c_quot_double(a, b) ((a)/(b)) + #define __Pyx_c_neg_double(a) (-(a)) + #ifdef __cplusplus + #define __Pyx_c_is_zero_double(z) ((z)==(double)0) + #define __Pyx_c_conj_double(z) (::std::conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (::std::abs(z)) + #define __Pyx_c_pow_double(a, b) (::std::pow(a, b)) + #endif + #else + #define __Pyx_c_is_zero_double(z) ((z)==0) + #define __Pyx_c_conj_double(z) (conj(z)) + #if 1 + #define __Pyx_c_abs_double(z) (cabs(z)) + #define __Pyx_c_pow_double(a, b) (cpow(a, b)) + #endif + #endif +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex, __pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex); + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex); + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex); + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex, __pyx_t_double_complex); + #endif +#endif + +/* MemviewSliceCopyTemplate.proto */ +static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object); + +/* MemviewSliceInit.proto */ +#define __Pyx_BUF_MAX_NDIMS %(BUF_MAX_NDIMS)d +#define __Pyx_MEMVIEW_DIRECT 1 +#define __Pyx_MEMVIEW_PTR 2 +#define __Pyx_MEMVIEW_FULL 4 +#define __Pyx_MEMVIEW_CONTIG 8 +#define __Pyx_MEMVIEW_STRIDED 16 +#define __Pyx_MEMVIEW_FOLLOW 32 +#define __Pyx_IS_C_CONTIG 1 +#define __Pyx_IS_F_CONTIG 2 +static int __Pyx_init_memviewslice( + struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference); +static CYTHON_INLINE int __pyx_add_acquisition_count_locked( + __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock); +static CYTHON_INLINE int __pyx_sub_acquisition_count_locked( + __pyx_atomic_int_type *acquisition_count, PyThread_type_lock lock); +#define __pyx_get_slice_count_pointer(memview) (&memview->acquisition_count) +#define __PYX_INC_MEMVIEW(slice, have_gil) __Pyx_INC_MEMVIEW(slice, have_gil, __LINE__) +#define __PYX_XCLEAR_MEMVIEW(slice, have_gil) __Pyx_XCLEAR_MEMVIEW(slice, have_gil, __LINE__) +static CYTHON_INLINE void __Pyx_INC_MEMVIEW(__Pyx_memviewslice *, int, int); +static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *, int, int); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value); + +/* CIntToPy.proto */ +static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value); + +/* CIntFromPy.proto */ +static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *); + +/* CIntFromPy.proto */ +static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *); + +/* FormatTypeName.proto */ +#if CYTHON_COMPILING_IN_LIMITED_API +typedef PyObject *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%U" +static __Pyx_TypeName __Pyx_PyType_GetName(PyTypeObject* tp); +#define __Pyx_DECREF_TypeName(obj) Py_XDECREF(obj) +#else +typedef const char *__Pyx_TypeName; +#define __Pyx_FMT_TYPENAME "%.200s" +#define __Pyx_PyType_GetName(tp) ((tp)->tp_name) +#define __Pyx_DECREF_TypeName(obj) +#endif + +/* CheckBinaryVersion.proto */ +static int __Pyx_check_binary_version(void); + +/* InitStrings.proto */ +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t); + +/* #### Code section: module_declarations ### */ +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self); /* proto*/ +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto*/ +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src); /* proto*/ +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self); /* proto*/ +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp); /* proto*/ +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value); /* proto*/ +static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto*/ +static CYTHON_INLINE PyObject *__pyx_f_5numpy_7ndarray_4base_base(PyArrayObject *__pyx_v_self); /* proto*/ +static CYTHON_INLINE PyArray_Descr *__pyx_f_5numpy_7ndarray_5descr_descr(PyArrayObject *__pyx_v_self); /* proto*/ +static CYTHON_INLINE int __pyx_f_5numpy_7ndarray_4ndim_ndim(PyArrayObject *__pyx_v_self); /* proto*/ +static CYTHON_INLINE npy_intp *__pyx_f_5numpy_7ndarray_5shape_shape(PyArrayObject *__pyx_v_self); /* proto*/ +static CYTHON_INLINE npy_intp *__pyx_f_5numpy_7ndarray_7strides_strides(PyArrayObject *__pyx_v_self); /* proto*/ +static CYTHON_INLINE npy_intp __pyx_f_5numpy_7ndarray_4size_size(PyArrayObject *__pyx_v_self); /* proto*/ +static CYTHON_INLINE char *__pyx_f_5numpy_7ndarray_4data_data(PyArrayObject *__pyx_v_self); /* proto*/ + +/* Module declarations from "libc.string" */ + +/* Module declarations from "libc.stdio" */ + +/* Module declarations from "__builtin__" */ + +/* Module declarations from "cpython.type" */ + +/* Module declarations from "cpython" */ + +/* Module declarations from "cpython.object" */ + +/* Module declarations from "cpython.ref" */ + +/* Module declarations from "numpy" */ + +/* Module declarations from "numpy" */ +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void); /*proto*/ + +/* Module declarations from "cython.view" */ + +/* Module declarations from "cython.dataclasses" */ + +/* Module declarations from "cython" */ + +/* Module declarations from "box_intersection" */ +static int __pyx_v_16box_intersection_boolean_variable; +static PyObject *__pyx_collections_abc_Sequence = 0; +static PyObject *generic = 0; +static PyObject *strided = 0; +static PyObject *indirect = 0; +static PyObject *contiguous = 0; +static PyObject *indirect_contiguous = 0; +static int __pyx_memoryview_thread_locks_used; +static PyThread_type_lock __pyx_memoryview_thread_locks[8]; +static CYTHON_INLINE int __pyx_f_16box_intersection_inside(PyObject *, PyObject *, PyObject *); /*proto*/ +static void __pyx_f_16box_intersection_copy_points(__Pyx_memviewslice, __Pyx_memviewslice, Py_ssize_t); /*proto*/ +static CYTHON_INLINE Py_ssize_t __pyx_f_16box_intersection_add_point(__Pyx_memviewslice, __Pyx_memviewslice, Py_ssize_t); /*proto*/ +static Py_ssize_t __pyx_f_16box_intersection_computeIntersection_and_add(__Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, __Pyx_memviewslice, Py_ssize_t); /*proto*/ +static int __pyx_array_allocate_buffer(struct __pyx_array_obj *); /*proto*/ +static struct __pyx_array_obj *__pyx_array_new(PyObject *, Py_ssize_t, char *, char *, char *); /*proto*/ +static PyObject *__pyx_memoryview_new(PyObject *, int, int, __Pyx_TypeInfo *); /*proto*/ +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *); /*proto*/ +static PyObject *_unellipsify(PyObject *, int); /*proto*/ +static int assert_direct_dimensions(Py_ssize_t *, int); /*proto*/ +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *, PyObject *); /*proto*/ +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int *, Py_ssize_t, Py_ssize_t, Py_ssize_t, int, int, int, int); /*proto*/ +static char *__pyx_pybuffer_index(Py_buffer *, char *, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memslice_transpose(__Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice, int, PyObject *(*)(char *), int (*)(char *, PyObject *), int); /*proto*/ +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *); /*proto*/ +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *, __Pyx_memviewslice *); /*proto*/ +static Py_ssize_t abs_py_ssize_t(Py_ssize_t); /*proto*/ +static char __pyx_get_best_slice_order(__Pyx_memviewslice *, int); /*proto*/ +static void _copy_strided_to_strided(char *, Py_ssize_t *, char *, Py_ssize_t *, Py_ssize_t *, Py_ssize_t *, int, size_t); /*proto*/ +static void copy_strided_to_strided(__Pyx_memviewslice *, __Pyx_memviewslice *, int, size_t); /*proto*/ +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *, int); /*proto*/ +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *, Py_ssize_t *, Py_ssize_t, int, char); /*proto*/ +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *, __Pyx_memviewslice *, char, int); /*proto*/ +static int __pyx_memoryview_err_extents(int, Py_ssize_t, Py_ssize_t); /*proto*/ +static int __pyx_memoryview_err_dim(PyObject *, PyObject *, int); /*proto*/ +static int __pyx_memoryview_err(PyObject *, PyObject *); /*proto*/ +static int __pyx_memoryview_err_no_memory(void); /*proto*/ +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice, __Pyx_memviewslice, int, int, int); /*proto*/ +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *, int, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_refcount_objects_in_slice(char *, Py_ssize_t *, Py_ssize_t *, int, int); /*proto*/ +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *, int, size_t, void *, int); /*proto*/ +static void __pyx_memoryview__slice_assign_scalar(char *, Py_ssize_t *, Py_ssize_t *, int, size_t, void *); /*proto*/ +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *, PyObject *); /*proto*/ +/* #### Code section: typeinfo ### */ +static __Pyx_TypeInfo __Pyx_TypeInfo_float = { "float", NULL, sizeof(float), { 0 }, 0, 'R', 0, 0 }; +static __Pyx_TypeInfo __Pyx_TypeInfo_int = { "int", NULL, sizeof(int), { 0 }, 0, __PYX_IS_UNSIGNED(int) ? 'U' : 'I', __PYX_IS_UNSIGNED(int), 0 }; +/* #### Code section: before_global_var ### */ +#define __Pyx_MODULE_NAME "box_intersection" +extern int __pyx_module_is_main_box_intersection; +int __pyx_module_is_main_box_intersection = 0; + +/* Implementation of "box_intersection" */ +/* #### Code section: global_var ### */ +static PyObject *__pyx_builtin_range; +static PyObject *__pyx_builtin___import__; +static PyObject *__pyx_builtin_ValueError; +static PyObject *__pyx_builtin_MemoryError; +static PyObject *__pyx_builtin_enumerate; +static PyObject *__pyx_builtin_TypeError; +static PyObject *__pyx_builtin_AssertionError; +static PyObject *__pyx_builtin_Ellipsis; +static PyObject *__pyx_builtin_id; +static PyObject *__pyx_builtin_IndexError; +static PyObject *__pyx_builtin_ImportError; +/* #### Code section: string_decls ### */ +static const char __pyx_k_[] = ": "; +static const char __pyx_k_B[] = "B"; +static const char __pyx_k_O[] = "O"; +static const char __pyx_k_b[] = "b"; +static const char __pyx_k_c[] = "c"; +static const char __pyx_k_e[] = "e"; +static const char __pyx_k_s[] = "s"; +static const char __pyx_k_x[] = "x"; +static const char __pyx_k_K1[] = "K1"; +static const char __pyx_k_K2[] = "K2"; +static const char __pyx_k__2[] = "."; +static const char __pyx_k__3[] = "*"; +static const char __pyx_k__6[] = "'"; +static const char __pyx_k__7[] = ")"; +static const char __pyx_k_dc[] = "dc"; +static const char __pyx_k_dp[] = "dp"; +static const char __pyx_k_gc[] = "gc"; +static const char __pyx_k_id[] = "id"; +static const char __pyx_k_k1[] = "k1"; +static const char __pyx_k_k2[] = "k2"; +static const char __pyx_k_n1[] = "n1"; +static const char __pyx_k_n2[] = "n2"; +static const char __pyx_k_n3[] = "n3"; +static const char __pyx_k_np[] = "np"; +static const char __pyx_k_xs[] = "xs"; +static const char __pyx_k_ys[] = "ys"; +static const char __pyx_k__30[] = "?"; +static const char __pyx_k_abc[] = "abc"; +static const char __pyx_k_abs[] = "abs"; +static const char __pyx_k_and[] = " and "; +static const char __pyx_k_cp1[] = "cp1"; +static const char __pyx_k_cp2[] = "cp2"; +static const char __pyx_k_dot[] = "dot"; +static const char __pyx_k_got[] = " (got "; +static const char __pyx_k_inc[] = "inc"; +static const char __pyx_k_new[] = "__new__"; +static const char __pyx_k_obj[] = "obj"; +static const char __pyx_k_sys[] = "sys"; +static const char __pyx_k_base[] = "base"; +static const char __pyx_k_cidx[] = "cidx"; +static const char __pyx_k_copy[] = "copy"; +static const char __pyx_k_dict[] = "__dict__"; +static const char __pyx_k_iidx[] = "iidx"; +static const char __pyx_k_lenc[] = "lenc"; +static const char __pyx_k_main[] = "__main__"; +static const char __pyx_k_mode[] = "mode"; +static const char __pyx_k_name[] = "name"; +static const char __pyx_k_ndim[] = "ndim"; +static const char __pyx_k_pack[] = "pack"; +static const char __pyx_k_roll[] = "roll"; +static const char __pyx_k_size[] = "size"; +static const char __pyx_k_spec[] = "__spec__"; +static const char __pyx_k_step[] = "step"; +static const char __pyx_k_stop[] = "stop"; +static const char __pyx_k_test[] = "__test__"; +static const char __pyx_k_ASCII[] = "ASCII"; +static const char __pyx_k_FLOAT[] = "FLOAT"; +static const char __pyx_k_array[] = "array"; +static const char __pyx_k_class[] = "__class__"; +static const char __pyx_k_clear[] = "clear"; +static const char __pyx_k_count[] = "count"; +static const char __pyx_k_dtype[] = "dtype"; +static const char __pyx_k_error[] = "error"; +static const char __pyx_k_flags[] = "flags"; +static const char __pyx_k_index[] = "index"; +static const char __pyx_k_inter[] = "inter"; +static const char __pyx_k_numpy[] = "numpy"; +static const char __pyx_k_range[] = "range"; +static const char __pyx_k_rect1[] = "rect1"; +static const char __pyx_k_rect2[] = "rect2"; +static const char __pyx_k_shape[] = "shape"; +static const char __pyx_k_start[] = "start"; +static const char __pyx_k_zeros[] = "zeros"; +static const char __pyx_k_astype[] = "astype"; +static const char __pyx_k_enable[] = "enable"; +static const char __pyx_k_encode[] = "encode"; +static const char __pyx_k_format[] = "format"; +static const char __pyx_k_import[] = "__import__"; +static const char __pyx_k_name_2[] = "__name__"; +static const char __pyx_k_ninter[] = "ninter"; +static const char __pyx_k_pickle[] = "pickle"; +static const char __pyx_k_reduce[] = "__reduce__"; +static const char __pyx_k_struct[] = "struct"; +static const char __pyx_k_unpack[] = "unpack"; +static const char __pyx_k_update[] = "update"; +static const char __pyx_k_disable[] = "disable"; +static const char __pyx_k_float32[] = "float32"; +static const char __pyx_k_fortran[] = "fortran"; +static const char __pyx_k_memview[] = "memview"; +static const char __pyx_k_nums_k2[] = "nums_k2"; +static const char __pyx_k_Ellipsis[] = "Ellipsis"; +static const char __pyx_k_Sequence[] = "Sequence"; +static const char __pyx_k_getstate[] = "__getstate__"; +static const char __pyx_k_itemsize[] = "itemsize"; +static const char __pyx_k_pyx_type[] = "__pyx_type"; +static const char __pyx_k_register[] = "register"; +static const char __pyx_k_setstate[] = "__setstate__"; +static const char __pyx_k_TypeError[] = "TypeError"; +static const char __pyx_k_enumerate[] = "enumerate"; +static const char __pyx_k_inputList[] = "inputList"; +static const char __pyx_k_isenabled[] = "isenabled"; +static const char __pyx_k_pyx_state[] = "__pyx_state"; +static const char __pyx_k_reduce_ex[] = "__reduce_ex__"; +static const char __pyx_k_IndexError[] = "IndexError"; +static const char __pyx_k_ValueError[] = "ValueError"; +static const char __pyx_k_clipVertex[] = "clipVertex"; +static const char __pyx_k_outputList[] = "outputList"; +static const char __pyx_k_pyx_result[] = "__pyx_result"; +static const char __pyx_k_pyx_vtable[] = "__pyx_vtable__"; +static const char __pyx_k_ImportError[] = "ImportError"; +static const char __pyx_k_MemoryError[] = "MemoryError"; +static const char __pyx_k_PickleError[] = "PickleError"; +static const char __pyx_k_approximate[] = "approximate"; +static const char __pyx_k_clipPolygon[] = "clipPolygon"; +static const char __pyx_k_collections[] = "collections"; +static const char __pyx_k_inter_areas[] = "inter_areas"; +static const char __pyx_k_ninput_list[] = "ninput_list"; +static const char __pyx_k_initializing[] = "_initializing"; +static const char __pyx_k_inputList_np[] = "inputList_np"; +static const char __pyx_k_is_coroutine[] = "_is_coroutine"; +static const char __pyx_k_noutput_list[] = "noutput_list"; +static const char __pyx_k_pyx_checksum[] = "__pyx_checksum"; +static const char __pyx_k_stringsource[] = ""; +static const char __pyx_k_version_info[] = "version_info"; +static const char __pyx_k_class_getitem[] = "__class_getitem__"; +static const char __pyx_k_outputList_np[] = "outputList_np"; +static const char __pyx_k_reduce_cython[] = "__reduce_cython__"; +static const char __pyx_k_subjectVertex[] = "subjectVertex"; +static const char __pyx_k_AssertionError[] = "AssertionError"; +static const char __pyx_k_subjectPolygon[] = "subjectPolygon"; +static const char __pyx_k_View_MemoryView[] = "View.MemoryView"; +static const char __pyx_k_allocate_buffer[] = "allocate_buffer"; +static const char __pyx_k_collections_abc[] = "collections.abc"; +static const char __pyx_k_dtype_is_object[] = "dtype_is_object"; +static const char __pyx_k_num_clip_points[] = "num_clip_points"; +static const char __pyx_k_pyx_PickleError[] = "__pyx_PickleError"; +static const char __pyx_k_setstate_cython[] = "__setstate_cython__"; +static const char __pyx_k_box_intersection[] = "box_intersection"; +static const char __pyx_k_pyx_unpickle_Enum[] = "__pyx_unpickle_Enum"; +static const char __pyx_k_asyncio_coroutines[] = "asyncio.coroutines"; +static const char __pyx_k_cline_in_traceback[] = "cline_in_traceback"; +static const char __pyx_k_polygon_clip_float[] = "polygon_clip_float"; +static const char __pyx_k_strided_and_direct[] = ""; +static const char __pyx_k_computeIntersection[] = "computeIntersection"; +static const char __pyx_k_non_rot_inter_areas[] = "non_rot_inter_areas"; +static const char __pyx_k_polygon_clip_unnest[] = "polygon_clip_unnest"; +static const char __pyx_k_MAX_INTERSECT_POINTS[] = "MAX_INTERSECT_POINTS"; +static const char __pyx_k_box_intersection_pyx[] = "box_intersection.pyx"; +static const char __pyx_k_num_intersect_points[] = "num_intersect_points"; +static const char __pyx_k_strided_and_indirect[] = ""; +static const char __pyx_k_Invalid_shape_in_axis[] = "Invalid shape in axis "; +static const char __pyx_k_contiguous_and_direct[] = ""; +static const char __pyx_k_Cannot_index_with_type[] = "Cannot index with type '"; +static const char __pyx_k_MemoryView_of_r_object[] = ""; +static const char __pyx_k_MemoryView_of_r_at_0x_x[] = ""; +static const char __pyx_k_contiguous_and_indirect[] = ""; +static const char __pyx_k_Dimension_d_is_not_direct[] = "Dimension %d is not direct"; +static const char __pyx_k_Index_out_of_bounds_axis_d[] = "Index out of bounds (axis %d)"; +static const char __pyx_k_Step_may_not_be_zero_axis_d[] = "Step may not be zero (axis %d)"; +static const char __pyx_k_itemsize_0_for_cython_array[] = "itemsize <= 0 for cython.array"; +static const char __pyx_k_unable_to_allocate_array_data[] = "unable to allocate array data."; +static const char __pyx_k_strided_and_direct_or_indirect[] = ""; +static const char __pyx_k_numpy_core_multiarray_failed_to[] = "numpy.core.multiarray failed to import"; +static const char __pyx_k_All_dimensions_preceding_dimensi[] = "All dimensions preceding dimension %d must be indexed and not sliced"; +static const char __pyx_k_Buffer_view_does_not_expose_stri[] = "Buffer view does not expose strides"; +static const char __pyx_k_Can_only_create_a_buffer_that_is[] = "Can only create a buffer that is contiguous in memory."; +static const char __pyx_k_Cannot_assign_to_read_only_memor[] = "Cannot assign to read-only memoryview"; +static const char __pyx_k_Cannot_create_writable_memory_vi[] = "Cannot create writable memory view from read-only memoryview"; +static const char __pyx_k_Cannot_transpose_memoryview_with[] = "Cannot transpose memoryview with indirect dimensions"; +static const char __pyx_k_Empty_shape_tuple_for_cython_arr[] = "Empty shape tuple for cython.array"; +static const char __pyx_k_Incompatible_checksums_0x_x_vs_0[] = "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))"; +static const char __pyx_k_Indirect_dimensions_not_supporte[] = "Indirect dimensions not supported"; +static const char __pyx_k_Invalid_mode_expected_c_or_fortr[] = "Invalid mode, expected 'c' or 'fortran', got "; +static const char __pyx_k_Out_of_bounds_on_buffer_access_a[] = "Out of bounds on buffer access (axis "; +static const char __pyx_k_Unable_to_convert_item_to_object[] = "Unable to convert item to object"; +static const char __pyx_k_got_differing_extents_in_dimensi[] = "got differing extents in dimension "; +static const char __pyx_k_no_default___reduce___due_to_non[] = "no default __reduce__ due to non-trivial __cinit__"; +static const char __pyx_k_numpy_core_umath_failed_to_impor[] = "numpy.core.umath failed to import"; +static const char __pyx_k_unable_to_allocate_shape_and_str[] = "unable to allocate shape and strides."; +/* #### Code section: decls ### */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr); /* proto */ +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item); /* proto */ +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /* proto */ +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name); /* proto */ +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object); /* proto */ +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /* proto */ +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self); /* proto */ +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state); /* proto */ +static PyObject *__pyx_pf_16box_intersection_computeIntersection(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_cp1, PyObject *__pyx_v_cp2, PyObject *__pyx_v_s, PyObject *__pyx_v_e); /* proto */ +static PyObject *__pyx_pf_16box_intersection_2polygon_clip_unnest(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_subjectPolygon, __Pyx_memviewslice __pyx_v_clipPolygon); /* proto */ +static PyObject *__pyx_pf_16box_intersection_4polygon_clip_float(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_subjectPolygon, __Pyx_memviewslice __pyx_v_clipPolygon); /* proto */ +static PyObject *__pyx_pf_16box_intersection_6box_intersection(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_rect1, __Pyx_memviewslice __pyx_v_rect2, __Pyx_memviewslice __pyx_v_non_rot_inter_areas, __Pyx_memviewslice __pyx_v_nums_k2, __Pyx_memviewslice __pyx_v_inter_areas, int __pyx_v_approximate); /* proto */ +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k); /*proto*/ +static __Pyx_CachedCFunction __pyx_umethod_PyList_Type_clear = {0, 0, 0, 0, 0}; +static __Pyx_CachedCFunction __pyx_umethod_PyList_Type_copy = {0, 0, 0, 0, 0}; +/* #### Code section: late_includes ### */ +/* #### Code section: module_state ### */ +typedef struct { + PyObject *__pyx_d; + PyObject *__pyx_b; + PyObject *__pyx_cython_runtime; + PyObject *__pyx_empty_tuple; + PyObject *__pyx_empty_bytes; + PyObject *__pyx_empty_unicode; + #ifdef __Pyx_CyFunction_USED + PyTypeObject *__pyx_CyFunctionType; + #endif + #ifdef __Pyx_FusedFunction_USED + PyTypeObject *__pyx_FusedFunctionType; + #endif + #ifdef __Pyx_Generator_USED + PyTypeObject *__pyx_GeneratorType; + #endif + #ifdef __Pyx_IterableCoroutine_USED + PyTypeObject *__pyx_IterableCoroutineType; + #endif + #ifdef __Pyx_Coroutine_USED + PyTypeObject *__pyx_CoroutineAwaitType; + #endif + #ifdef __Pyx_Coroutine_USED + PyTypeObject *__pyx_CoroutineType; + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + PyTypeObject *__pyx_ptype_7cpython_4type_type; + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + PyTypeObject *__pyx_ptype_5numpy_dtype; + PyTypeObject *__pyx_ptype_5numpy_flatiter; + PyTypeObject *__pyx_ptype_5numpy_broadcast; + PyTypeObject *__pyx_ptype_5numpy_ndarray; + PyTypeObject *__pyx_ptype_5numpy_generic; + PyTypeObject *__pyx_ptype_5numpy_number; + PyTypeObject *__pyx_ptype_5numpy_integer; + PyTypeObject *__pyx_ptype_5numpy_signedinteger; + PyTypeObject *__pyx_ptype_5numpy_unsignedinteger; + PyTypeObject *__pyx_ptype_5numpy_inexact; + PyTypeObject *__pyx_ptype_5numpy_floating; + PyTypeObject *__pyx_ptype_5numpy_complexfloating; + PyTypeObject *__pyx_ptype_5numpy_flexible; + PyTypeObject *__pyx_ptype_5numpy_character; + PyTypeObject *__pyx_ptype_5numpy_ufunc; + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + #endif + #if CYTHON_USE_MODULE_STATE + PyObject *__pyx_type___pyx_array; + PyObject *__pyx_type___pyx_MemviewEnum; + PyObject *__pyx_type___pyx_memoryview; + PyObject *__pyx_type___pyx_memoryviewslice; + #endif + PyTypeObject *__pyx_array_type; + PyTypeObject *__pyx_MemviewEnum_type; + PyTypeObject *__pyx_memoryview_type; + PyTypeObject *__pyx_memoryviewslice_type; + PyObject *__pyx_kp_u_; + PyObject *__pyx_n_s_ASCII; + PyObject *__pyx_kp_s_All_dimensions_preceding_dimensi; + PyObject *__pyx_n_s_AssertionError; + PyObject *__pyx_n_s_B; + PyObject *__pyx_kp_s_Buffer_view_does_not_expose_stri; + PyObject *__pyx_kp_s_Can_only_create_a_buffer_that_is; + PyObject *__pyx_kp_s_Cannot_assign_to_read_only_memor; + PyObject *__pyx_kp_s_Cannot_create_writable_memory_vi; + PyObject *__pyx_kp_u_Cannot_index_with_type; + PyObject *__pyx_kp_s_Cannot_transpose_memoryview_with; + PyObject *__pyx_kp_s_Dimension_d_is_not_direct; + PyObject *__pyx_n_s_Ellipsis; + PyObject *__pyx_kp_s_Empty_shape_tuple_for_cython_arr; + PyObject *__pyx_n_s_FLOAT; + PyObject *__pyx_n_s_ImportError; + PyObject *__pyx_kp_s_Incompatible_checksums_0x_x_vs_0; + PyObject *__pyx_n_s_IndexError; + PyObject *__pyx_kp_s_Index_out_of_bounds_axis_d; + PyObject *__pyx_kp_s_Indirect_dimensions_not_supporte; + PyObject *__pyx_kp_u_Invalid_mode_expected_c_or_fortr; + PyObject *__pyx_kp_u_Invalid_shape_in_axis; + PyObject *__pyx_n_s_K1; + PyObject *__pyx_n_s_K2; + PyObject *__pyx_n_s_MAX_INTERSECT_POINTS; + PyObject *__pyx_n_s_MemoryError; + PyObject *__pyx_kp_s_MemoryView_of_r_at_0x_x; + PyObject *__pyx_kp_s_MemoryView_of_r_object; + PyObject *__pyx_n_b_O; + PyObject *__pyx_kp_u_Out_of_bounds_on_buffer_access_a; + PyObject *__pyx_n_s_PickleError; + PyObject *__pyx_n_s_Sequence; + PyObject *__pyx_kp_s_Step_may_not_be_zero_axis_d; + PyObject *__pyx_n_s_TypeError; + PyObject *__pyx_kp_s_Unable_to_convert_item_to_object; + PyObject *__pyx_n_s_ValueError; + PyObject *__pyx_n_s_View_MemoryView; + PyObject *__pyx_kp_u__2; + PyObject *__pyx_n_s__3; + PyObject *__pyx_n_s__30; + PyObject *__pyx_kp_u__6; + PyObject *__pyx_kp_u__7; + PyObject *__pyx_n_s_abc; + PyObject *__pyx_n_s_abs; + PyObject *__pyx_n_s_allocate_buffer; + PyObject *__pyx_kp_u_and; + PyObject *__pyx_n_s_approximate; + PyObject *__pyx_n_s_array; + PyObject *__pyx_n_s_astype; + PyObject *__pyx_n_s_asyncio_coroutines; + PyObject *__pyx_n_s_b; + PyObject *__pyx_n_s_base; + PyObject *__pyx_n_s_box_intersection; + PyObject *__pyx_kp_s_box_intersection_pyx; + PyObject *__pyx_n_s_c; + PyObject *__pyx_n_u_c; + PyObject *__pyx_n_s_cidx; + PyObject *__pyx_n_s_class; + PyObject *__pyx_n_s_class_getitem; + PyObject *__pyx_n_s_clear; + PyObject *__pyx_n_s_cline_in_traceback; + PyObject *__pyx_n_s_clipPolygon; + PyObject *__pyx_n_s_clipVertex; + PyObject *__pyx_n_s_collections; + PyObject *__pyx_kp_s_collections_abc; + PyObject *__pyx_n_s_computeIntersection; + PyObject *__pyx_kp_s_contiguous_and_direct; + PyObject *__pyx_kp_s_contiguous_and_indirect; + PyObject *__pyx_n_s_copy; + PyObject *__pyx_n_s_count; + PyObject *__pyx_n_s_cp1; + PyObject *__pyx_n_s_cp2; + PyObject *__pyx_n_s_dc; + PyObject *__pyx_n_s_dict; + PyObject *__pyx_kp_u_disable; + PyObject *__pyx_n_s_dot; + PyObject *__pyx_n_s_dp; + PyObject *__pyx_n_s_dtype; + PyObject *__pyx_n_s_dtype_is_object; + PyObject *__pyx_n_s_e; + PyObject *__pyx_kp_u_enable; + PyObject *__pyx_n_s_encode; + PyObject *__pyx_n_s_enumerate; + PyObject *__pyx_n_s_error; + PyObject *__pyx_n_s_flags; + PyObject *__pyx_n_s_float32; + PyObject *__pyx_n_s_format; + PyObject *__pyx_n_s_fortran; + PyObject *__pyx_n_u_fortran; + PyObject *__pyx_kp_u_gc; + PyObject *__pyx_n_s_getstate; + PyObject *__pyx_kp_u_got; + PyObject *__pyx_kp_u_got_differing_extents_in_dimensi; + PyObject *__pyx_n_s_id; + PyObject *__pyx_n_s_iidx; + PyObject *__pyx_n_s_import; + PyObject *__pyx_n_s_inc; + PyObject *__pyx_n_s_index; + PyObject *__pyx_n_s_initializing; + PyObject *__pyx_n_s_inputList; + PyObject *__pyx_n_s_inputList_np; + PyObject *__pyx_n_s_inter; + PyObject *__pyx_n_s_inter_areas; + PyObject *__pyx_n_s_is_coroutine; + PyObject *__pyx_kp_u_isenabled; + PyObject *__pyx_n_s_itemsize; + PyObject *__pyx_kp_s_itemsize_0_for_cython_array; + PyObject *__pyx_n_s_k1; + PyObject *__pyx_n_s_k2; + PyObject *__pyx_n_s_lenc; + PyObject *__pyx_n_s_main; + PyObject *__pyx_n_s_memview; + PyObject *__pyx_n_s_mode; + PyObject *__pyx_n_s_n1; + PyObject *__pyx_n_s_n2; + PyObject *__pyx_n_s_n3; + PyObject *__pyx_n_s_name; + PyObject *__pyx_n_s_name_2; + PyObject *__pyx_n_s_ndim; + PyObject *__pyx_n_s_new; + PyObject *__pyx_n_s_ninput_list; + PyObject *__pyx_n_s_ninter; + PyObject *__pyx_kp_s_no_default___reduce___due_to_non; + PyObject *__pyx_n_s_non_rot_inter_areas; + PyObject *__pyx_n_s_noutput_list; + PyObject *__pyx_n_s_np; + PyObject *__pyx_n_s_num_clip_points; + PyObject *__pyx_n_s_num_intersect_points; + PyObject *__pyx_n_s_numpy; + PyObject *__pyx_kp_s_numpy_core_multiarray_failed_to; + PyObject *__pyx_kp_s_numpy_core_umath_failed_to_impor; + PyObject *__pyx_n_s_nums_k2; + PyObject *__pyx_n_s_obj; + PyObject *__pyx_n_s_outputList; + PyObject *__pyx_n_s_outputList_np; + PyObject *__pyx_n_s_pack; + PyObject *__pyx_n_s_pickle; + PyObject *__pyx_n_s_polygon_clip_float; + PyObject *__pyx_n_s_polygon_clip_unnest; + PyObject *__pyx_n_s_pyx_PickleError; + PyObject *__pyx_n_s_pyx_checksum; + PyObject *__pyx_n_s_pyx_result; + PyObject *__pyx_n_s_pyx_state; + PyObject *__pyx_n_s_pyx_type; + PyObject *__pyx_n_s_pyx_unpickle_Enum; + PyObject *__pyx_n_s_pyx_vtable; + PyObject *__pyx_n_s_range; + PyObject *__pyx_n_s_rect1; + PyObject *__pyx_n_s_rect2; + PyObject *__pyx_n_s_reduce; + PyObject *__pyx_n_s_reduce_cython; + PyObject *__pyx_n_s_reduce_ex; + PyObject *__pyx_n_s_register; + PyObject *__pyx_n_s_roll; + PyObject *__pyx_n_s_s; + PyObject *__pyx_n_s_setstate; + PyObject *__pyx_n_s_setstate_cython; + PyObject *__pyx_n_s_shape; + PyObject *__pyx_n_s_size; + PyObject *__pyx_n_s_spec; + PyObject *__pyx_n_s_start; + PyObject *__pyx_n_s_step; + PyObject *__pyx_n_s_stop; + PyObject *__pyx_kp_s_strided_and_direct; + PyObject *__pyx_kp_s_strided_and_direct_or_indirect; + PyObject *__pyx_kp_s_strided_and_indirect; + PyObject *__pyx_kp_s_stringsource; + PyObject *__pyx_n_s_struct; + PyObject *__pyx_n_s_subjectPolygon; + PyObject *__pyx_n_s_subjectVertex; + PyObject *__pyx_n_s_sys; + PyObject *__pyx_n_s_test; + PyObject *__pyx_kp_s_unable_to_allocate_array_data; + PyObject *__pyx_kp_s_unable_to_allocate_shape_and_str; + PyObject *__pyx_n_s_unpack; + PyObject *__pyx_n_s_update; + PyObject *__pyx_n_s_version_info; + PyObject *__pyx_n_s_x; + PyObject *__pyx_n_s_xs; + PyObject *__pyx_n_s_ys; + PyObject *__pyx_n_s_zeros; + PyObject *__pyx_float_0_5; + PyObject *__pyx_float_1_0; + PyObject *__pyx_int_0; + PyObject *__pyx_int_1; + PyObject *__pyx_int_2; + PyObject *__pyx_int_3; + PyObject *__pyx_int_112105877; + PyObject *__pyx_int_136983863; + PyObject *__pyx_int_184977713; + PyObject *__pyx_int_neg_1; + PyObject *__pyx_slice__5; + PyObject *__pyx_tuple__4; + PyObject *__pyx_tuple__8; + PyObject *__pyx_tuple__9; + PyObject *__pyx_tuple__10; + PyObject *__pyx_tuple__11; + PyObject *__pyx_tuple__12; + PyObject *__pyx_tuple__13; + PyObject *__pyx_tuple__14; + PyObject *__pyx_tuple__15; + PyObject *__pyx_tuple__16; + PyObject *__pyx_tuple__17; + PyObject *__pyx_tuple__18; + PyObject *__pyx_tuple__19; + PyObject *__pyx_tuple__20; + PyObject *__pyx_tuple__22; + PyObject *__pyx_tuple__24; + PyObject *__pyx_tuple__26; + PyObject *__pyx_tuple__28; + PyObject *__pyx_codeobj__21; + PyObject *__pyx_codeobj__23; + PyObject *__pyx_codeobj__25; + PyObject *__pyx_codeobj__27; + PyObject *__pyx_codeobj__29; +} __pyx_mstate; + +#if CYTHON_USE_MODULE_STATE +#ifdef __cplusplus +namespace { + extern struct PyModuleDef __pyx_moduledef; +} /* anonymous namespace */ +#else +static struct PyModuleDef __pyx_moduledef; +#endif + +#define __pyx_mstate(o) ((__pyx_mstate *)__Pyx_PyModule_GetState(o)) + +#define __pyx_mstate_global (__pyx_mstate(PyState_FindModule(&__pyx_moduledef))) + +#define __pyx_m (PyState_FindModule(&__pyx_moduledef)) +#else +static __pyx_mstate __pyx_mstate_global_static = +#ifdef __cplusplus + {}; +#else + {0}; +#endif +static __pyx_mstate *__pyx_mstate_global = &__pyx_mstate_global_static; +#endif +/* #### Code section: module_state_clear ### */ +#if CYTHON_USE_MODULE_STATE +static int __pyx_m_clear(PyObject *m) { + __pyx_mstate *clear_module_state = __pyx_mstate(m); + if (!clear_module_state) return 0; + Py_CLEAR(clear_module_state->__pyx_d); + Py_CLEAR(clear_module_state->__pyx_b); + Py_CLEAR(clear_module_state->__pyx_cython_runtime); + Py_CLEAR(clear_module_state->__pyx_empty_tuple); + Py_CLEAR(clear_module_state->__pyx_empty_bytes); + Py_CLEAR(clear_module_state->__pyx_empty_unicode); + #ifdef __Pyx_CyFunction_USED + Py_CLEAR(clear_module_state->__pyx_CyFunctionType); + #endif + #ifdef __Pyx_FusedFunction_USED + Py_CLEAR(clear_module_state->__pyx_FusedFunctionType); + #endif + Py_CLEAR(clear_module_state->__pyx_ptype_7cpython_4type_type); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_dtype); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_flatiter); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_broadcast); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_ndarray); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_generic); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_number); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_integer); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_signedinteger); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_unsignedinteger); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_inexact); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_floating); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_complexfloating); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_flexible); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_character); + Py_CLEAR(clear_module_state->__pyx_ptype_5numpy_ufunc); + Py_CLEAR(clear_module_state->__pyx_array_type); + Py_CLEAR(clear_module_state->__pyx_type___pyx_array); + Py_CLEAR(clear_module_state->__pyx_MemviewEnum_type); + Py_CLEAR(clear_module_state->__pyx_type___pyx_MemviewEnum); + Py_CLEAR(clear_module_state->__pyx_memoryview_type); + Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryview); + Py_CLEAR(clear_module_state->__pyx_memoryviewslice_type); + Py_CLEAR(clear_module_state->__pyx_type___pyx_memoryviewslice); + Py_CLEAR(clear_module_state->__pyx_kp_u_); + Py_CLEAR(clear_module_state->__pyx_n_s_ASCII); + Py_CLEAR(clear_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi); + Py_CLEAR(clear_module_state->__pyx_n_s_AssertionError); + Py_CLEAR(clear_module_state->__pyx_n_s_B); + Py_CLEAR(clear_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri); + Py_CLEAR(clear_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is); + Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor); + Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi); + Py_CLEAR(clear_module_state->__pyx_kp_u_Cannot_index_with_type); + Py_CLEAR(clear_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with); + Py_CLEAR(clear_module_state->__pyx_kp_s_Dimension_d_is_not_direct); + Py_CLEAR(clear_module_state->__pyx_n_s_Ellipsis); + Py_CLEAR(clear_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr); + Py_CLEAR(clear_module_state->__pyx_n_s_FLOAT); + Py_CLEAR(clear_module_state->__pyx_n_s_ImportError); + Py_CLEAR(clear_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0); + Py_CLEAR(clear_module_state->__pyx_n_s_IndexError); + Py_CLEAR(clear_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d); + Py_CLEAR(clear_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte); + Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr); + Py_CLEAR(clear_module_state->__pyx_kp_u_Invalid_shape_in_axis); + Py_CLEAR(clear_module_state->__pyx_n_s_K1); + Py_CLEAR(clear_module_state->__pyx_n_s_K2); + Py_CLEAR(clear_module_state->__pyx_n_s_MAX_INTERSECT_POINTS); + Py_CLEAR(clear_module_state->__pyx_n_s_MemoryError); + Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x); + Py_CLEAR(clear_module_state->__pyx_kp_s_MemoryView_of_r_object); + Py_CLEAR(clear_module_state->__pyx_n_b_O); + Py_CLEAR(clear_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a); + Py_CLEAR(clear_module_state->__pyx_n_s_PickleError); + Py_CLEAR(clear_module_state->__pyx_n_s_Sequence); + Py_CLEAR(clear_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d); + Py_CLEAR(clear_module_state->__pyx_n_s_TypeError); + Py_CLEAR(clear_module_state->__pyx_kp_s_Unable_to_convert_item_to_object); + Py_CLEAR(clear_module_state->__pyx_n_s_ValueError); + Py_CLEAR(clear_module_state->__pyx_n_s_View_MemoryView); + Py_CLEAR(clear_module_state->__pyx_kp_u__2); + Py_CLEAR(clear_module_state->__pyx_n_s__3); + Py_CLEAR(clear_module_state->__pyx_n_s__30); + Py_CLEAR(clear_module_state->__pyx_kp_u__6); + Py_CLEAR(clear_module_state->__pyx_kp_u__7); + Py_CLEAR(clear_module_state->__pyx_n_s_abc); + Py_CLEAR(clear_module_state->__pyx_n_s_abs); + Py_CLEAR(clear_module_state->__pyx_n_s_allocate_buffer); + Py_CLEAR(clear_module_state->__pyx_kp_u_and); + Py_CLEAR(clear_module_state->__pyx_n_s_approximate); + Py_CLEAR(clear_module_state->__pyx_n_s_array); + Py_CLEAR(clear_module_state->__pyx_n_s_astype); + Py_CLEAR(clear_module_state->__pyx_n_s_asyncio_coroutines); + Py_CLEAR(clear_module_state->__pyx_n_s_b); + Py_CLEAR(clear_module_state->__pyx_n_s_base); + Py_CLEAR(clear_module_state->__pyx_n_s_box_intersection); + Py_CLEAR(clear_module_state->__pyx_kp_s_box_intersection_pyx); + Py_CLEAR(clear_module_state->__pyx_n_s_c); + Py_CLEAR(clear_module_state->__pyx_n_u_c); + Py_CLEAR(clear_module_state->__pyx_n_s_cidx); + Py_CLEAR(clear_module_state->__pyx_n_s_class); + Py_CLEAR(clear_module_state->__pyx_n_s_class_getitem); + Py_CLEAR(clear_module_state->__pyx_n_s_clear); + Py_CLEAR(clear_module_state->__pyx_n_s_cline_in_traceback); + Py_CLEAR(clear_module_state->__pyx_n_s_clipPolygon); + Py_CLEAR(clear_module_state->__pyx_n_s_clipVertex); + Py_CLEAR(clear_module_state->__pyx_n_s_collections); + Py_CLEAR(clear_module_state->__pyx_kp_s_collections_abc); + Py_CLEAR(clear_module_state->__pyx_n_s_computeIntersection); + Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_direct); + Py_CLEAR(clear_module_state->__pyx_kp_s_contiguous_and_indirect); + Py_CLEAR(clear_module_state->__pyx_n_s_copy); + Py_CLEAR(clear_module_state->__pyx_n_s_count); + Py_CLEAR(clear_module_state->__pyx_n_s_cp1); + Py_CLEAR(clear_module_state->__pyx_n_s_cp2); + Py_CLEAR(clear_module_state->__pyx_n_s_dc); + Py_CLEAR(clear_module_state->__pyx_n_s_dict); + Py_CLEAR(clear_module_state->__pyx_kp_u_disable); + Py_CLEAR(clear_module_state->__pyx_n_s_dot); + Py_CLEAR(clear_module_state->__pyx_n_s_dp); + Py_CLEAR(clear_module_state->__pyx_n_s_dtype); + Py_CLEAR(clear_module_state->__pyx_n_s_dtype_is_object); + Py_CLEAR(clear_module_state->__pyx_n_s_e); + Py_CLEAR(clear_module_state->__pyx_kp_u_enable); + Py_CLEAR(clear_module_state->__pyx_n_s_encode); + Py_CLEAR(clear_module_state->__pyx_n_s_enumerate); + Py_CLEAR(clear_module_state->__pyx_n_s_error); + Py_CLEAR(clear_module_state->__pyx_n_s_flags); + Py_CLEAR(clear_module_state->__pyx_n_s_float32); + Py_CLEAR(clear_module_state->__pyx_n_s_format); + Py_CLEAR(clear_module_state->__pyx_n_s_fortran); + Py_CLEAR(clear_module_state->__pyx_n_u_fortran); + Py_CLEAR(clear_module_state->__pyx_kp_u_gc); + Py_CLEAR(clear_module_state->__pyx_n_s_getstate); + Py_CLEAR(clear_module_state->__pyx_kp_u_got); + Py_CLEAR(clear_module_state->__pyx_kp_u_got_differing_extents_in_dimensi); + Py_CLEAR(clear_module_state->__pyx_n_s_id); + Py_CLEAR(clear_module_state->__pyx_n_s_iidx); + Py_CLEAR(clear_module_state->__pyx_n_s_import); + Py_CLEAR(clear_module_state->__pyx_n_s_inc); + Py_CLEAR(clear_module_state->__pyx_n_s_index); + Py_CLEAR(clear_module_state->__pyx_n_s_initializing); + Py_CLEAR(clear_module_state->__pyx_n_s_inputList); + Py_CLEAR(clear_module_state->__pyx_n_s_inputList_np); + Py_CLEAR(clear_module_state->__pyx_n_s_inter); + Py_CLEAR(clear_module_state->__pyx_n_s_inter_areas); + Py_CLEAR(clear_module_state->__pyx_n_s_is_coroutine); + Py_CLEAR(clear_module_state->__pyx_kp_u_isenabled); + Py_CLEAR(clear_module_state->__pyx_n_s_itemsize); + Py_CLEAR(clear_module_state->__pyx_kp_s_itemsize_0_for_cython_array); + Py_CLEAR(clear_module_state->__pyx_n_s_k1); + Py_CLEAR(clear_module_state->__pyx_n_s_k2); + Py_CLEAR(clear_module_state->__pyx_n_s_lenc); + Py_CLEAR(clear_module_state->__pyx_n_s_main); + Py_CLEAR(clear_module_state->__pyx_n_s_memview); + Py_CLEAR(clear_module_state->__pyx_n_s_mode); + Py_CLEAR(clear_module_state->__pyx_n_s_n1); + Py_CLEAR(clear_module_state->__pyx_n_s_n2); + Py_CLEAR(clear_module_state->__pyx_n_s_n3); + Py_CLEAR(clear_module_state->__pyx_n_s_name); + Py_CLEAR(clear_module_state->__pyx_n_s_name_2); + Py_CLEAR(clear_module_state->__pyx_n_s_ndim); + Py_CLEAR(clear_module_state->__pyx_n_s_new); + Py_CLEAR(clear_module_state->__pyx_n_s_ninput_list); + Py_CLEAR(clear_module_state->__pyx_n_s_ninter); + Py_CLEAR(clear_module_state->__pyx_kp_s_no_default___reduce___due_to_non); + Py_CLEAR(clear_module_state->__pyx_n_s_non_rot_inter_areas); + Py_CLEAR(clear_module_state->__pyx_n_s_noutput_list); + Py_CLEAR(clear_module_state->__pyx_n_s_np); + Py_CLEAR(clear_module_state->__pyx_n_s_num_clip_points); + Py_CLEAR(clear_module_state->__pyx_n_s_num_intersect_points); + Py_CLEAR(clear_module_state->__pyx_n_s_numpy); + Py_CLEAR(clear_module_state->__pyx_kp_s_numpy_core_multiarray_failed_to); + Py_CLEAR(clear_module_state->__pyx_kp_s_numpy_core_umath_failed_to_impor); + Py_CLEAR(clear_module_state->__pyx_n_s_nums_k2); + Py_CLEAR(clear_module_state->__pyx_n_s_obj); + Py_CLEAR(clear_module_state->__pyx_n_s_outputList); + Py_CLEAR(clear_module_state->__pyx_n_s_outputList_np); + Py_CLEAR(clear_module_state->__pyx_n_s_pack); + Py_CLEAR(clear_module_state->__pyx_n_s_pickle); + Py_CLEAR(clear_module_state->__pyx_n_s_polygon_clip_float); + Py_CLEAR(clear_module_state->__pyx_n_s_polygon_clip_unnest); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_PickleError); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_checksum); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_result); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_state); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_type); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_unpickle_Enum); + Py_CLEAR(clear_module_state->__pyx_n_s_pyx_vtable); + Py_CLEAR(clear_module_state->__pyx_n_s_range); + Py_CLEAR(clear_module_state->__pyx_n_s_rect1); + Py_CLEAR(clear_module_state->__pyx_n_s_rect2); + Py_CLEAR(clear_module_state->__pyx_n_s_reduce); + Py_CLEAR(clear_module_state->__pyx_n_s_reduce_cython); + Py_CLEAR(clear_module_state->__pyx_n_s_reduce_ex); + Py_CLEAR(clear_module_state->__pyx_n_s_register); + Py_CLEAR(clear_module_state->__pyx_n_s_roll); + Py_CLEAR(clear_module_state->__pyx_n_s_s); + Py_CLEAR(clear_module_state->__pyx_n_s_setstate); + Py_CLEAR(clear_module_state->__pyx_n_s_setstate_cython); + Py_CLEAR(clear_module_state->__pyx_n_s_shape); + Py_CLEAR(clear_module_state->__pyx_n_s_size); + Py_CLEAR(clear_module_state->__pyx_n_s_spec); + Py_CLEAR(clear_module_state->__pyx_n_s_start); + Py_CLEAR(clear_module_state->__pyx_n_s_step); + Py_CLEAR(clear_module_state->__pyx_n_s_stop); + Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct); + Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_direct_or_indirect); + Py_CLEAR(clear_module_state->__pyx_kp_s_strided_and_indirect); + Py_CLEAR(clear_module_state->__pyx_kp_s_stringsource); + Py_CLEAR(clear_module_state->__pyx_n_s_struct); + Py_CLEAR(clear_module_state->__pyx_n_s_subjectPolygon); + Py_CLEAR(clear_module_state->__pyx_n_s_subjectVertex); + Py_CLEAR(clear_module_state->__pyx_n_s_sys); + Py_CLEAR(clear_module_state->__pyx_n_s_test); + Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_array_data); + Py_CLEAR(clear_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str); + Py_CLEAR(clear_module_state->__pyx_n_s_unpack); + Py_CLEAR(clear_module_state->__pyx_n_s_update); + Py_CLEAR(clear_module_state->__pyx_n_s_version_info); + Py_CLEAR(clear_module_state->__pyx_n_s_x); + Py_CLEAR(clear_module_state->__pyx_n_s_xs); + Py_CLEAR(clear_module_state->__pyx_n_s_ys); + Py_CLEAR(clear_module_state->__pyx_n_s_zeros); + Py_CLEAR(clear_module_state->__pyx_float_0_5); + Py_CLEAR(clear_module_state->__pyx_float_1_0); + Py_CLEAR(clear_module_state->__pyx_int_0); + Py_CLEAR(clear_module_state->__pyx_int_1); + Py_CLEAR(clear_module_state->__pyx_int_2); + Py_CLEAR(clear_module_state->__pyx_int_3); + Py_CLEAR(clear_module_state->__pyx_int_112105877); + Py_CLEAR(clear_module_state->__pyx_int_136983863); + Py_CLEAR(clear_module_state->__pyx_int_184977713); + Py_CLEAR(clear_module_state->__pyx_int_neg_1); + Py_CLEAR(clear_module_state->__pyx_slice__5); + Py_CLEAR(clear_module_state->__pyx_tuple__4); + Py_CLEAR(clear_module_state->__pyx_tuple__8); + Py_CLEAR(clear_module_state->__pyx_tuple__9); + Py_CLEAR(clear_module_state->__pyx_tuple__10); + Py_CLEAR(clear_module_state->__pyx_tuple__11); + Py_CLEAR(clear_module_state->__pyx_tuple__12); + Py_CLEAR(clear_module_state->__pyx_tuple__13); + Py_CLEAR(clear_module_state->__pyx_tuple__14); + Py_CLEAR(clear_module_state->__pyx_tuple__15); + Py_CLEAR(clear_module_state->__pyx_tuple__16); + Py_CLEAR(clear_module_state->__pyx_tuple__17); + Py_CLEAR(clear_module_state->__pyx_tuple__18); + Py_CLEAR(clear_module_state->__pyx_tuple__19); + Py_CLEAR(clear_module_state->__pyx_tuple__20); + Py_CLEAR(clear_module_state->__pyx_tuple__22); + Py_CLEAR(clear_module_state->__pyx_tuple__24); + Py_CLEAR(clear_module_state->__pyx_tuple__26); + Py_CLEAR(clear_module_state->__pyx_tuple__28); + Py_CLEAR(clear_module_state->__pyx_codeobj__21); + Py_CLEAR(clear_module_state->__pyx_codeobj__23); + Py_CLEAR(clear_module_state->__pyx_codeobj__25); + Py_CLEAR(clear_module_state->__pyx_codeobj__27); + Py_CLEAR(clear_module_state->__pyx_codeobj__29); + return 0; +} +#endif +/* #### Code section: module_state_traverse ### */ +#if CYTHON_USE_MODULE_STATE +static int __pyx_m_traverse(PyObject *m, visitproc visit, void *arg) { + __pyx_mstate *traverse_module_state = __pyx_mstate(m); + if (!traverse_module_state) return 0; + Py_VISIT(traverse_module_state->__pyx_d); + Py_VISIT(traverse_module_state->__pyx_b); + Py_VISIT(traverse_module_state->__pyx_cython_runtime); + Py_VISIT(traverse_module_state->__pyx_empty_tuple); + Py_VISIT(traverse_module_state->__pyx_empty_bytes); + Py_VISIT(traverse_module_state->__pyx_empty_unicode); + #ifdef __Pyx_CyFunction_USED + Py_VISIT(traverse_module_state->__pyx_CyFunctionType); + #endif + #ifdef __Pyx_FusedFunction_USED + Py_VISIT(traverse_module_state->__pyx_FusedFunctionType); + #endif + Py_VISIT(traverse_module_state->__pyx_ptype_7cpython_4type_type); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_dtype); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_flatiter); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_broadcast); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_ndarray); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_generic); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_number); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_integer); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_signedinteger); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_unsignedinteger); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_inexact); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_floating); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_complexfloating); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_flexible); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_character); + Py_VISIT(traverse_module_state->__pyx_ptype_5numpy_ufunc); + Py_VISIT(traverse_module_state->__pyx_array_type); + Py_VISIT(traverse_module_state->__pyx_type___pyx_array); + Py_VISIT(traverse_module_state->__pyx_MemviewEnum_type); + Py_VISIT(traverse_module_state->__pyx_type___pyx_MemviewEnum); + Py_VISIT(traverse_module_state->__pyx_memoryview_type); + Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryview); + Py_VISIT(traverse_module_state->__pyx_memoryviewslice_type); + Py_VISIT(traverse_module_state->__pyx_type___pyx_memoryviewslice); + Py_VISIT(traverse_module_state->__pyx_kp_u_); + Py_VISIT(traverse_module_state->__pyx_n_s_ASCII); + Py_VISIT(traverse_module_state->__pyx_kp_s_All_dimensions_preceding_dimensi); + Py_VISIT(traverse_module_state->__pyx_n_s_AssertionError); + Py_VISIT(traverse_module_state->__pyx_n_s_B); + Py_VISIT(traverse_module_state->__pyx_kp_s_Buffer_view_does_not_expose_stri); + Py_VISIT(traverse_module_state->__pyx_kp_s_Can_only_create_a_buffer_that_is); + Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_assign_to_read_only_memor); + Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_create_writable_memory_vi); + Py_VISIT(traverse_module_state->__pyx_kp_u_Cannot_index_with_type); + Py_VISIT(traverse_module_state->__pyx_kp_s_Cannot_transpose_memoryview_with); + Py_VISIT(traverse_module_state->__pyx_kp_s_Dimension_d_is_not_direct); + Py_VISIT(traverse_module_state->__pyx_n_s_Ellipsis); + Py_VISIT(traverse_module_state->__pyx_kp_s_Empty_shape_tuple_for_cython_arr); + Py_VISIT(traverse_module_state->__pyx_n_s_FLOAT); + Py_VISIT(traverse_module_state->__pyx_n_s_ImportError); + Py_VISIT(traverse_module_state->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0); + Py_VISIT(traverse_module_state->__pyx_n_s_IndexError); + Py_VISIT(traverse_module_state->__pyx_kp_s_Index_out_of_bounds_axis_d); + Py_VISIT(traverse_module_state->__pyx_kp_s_Indirect_dimensions_not_supporte); + Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_mode_expected_c_or_fortr); + Py_VISIT(traverse_module_state->__pyx_kp_u_Invalid_shape_in_axis); + Py_VISIT(traverse_module_state->__pyx_n_s_K1); + Py_VISIT(traverse_module_state->__pyx_n_s_K2); + Py_VISIT(traverse_module_state->__pyx_n_s_MAX_INTERSECT_POINTS); + Py_VISIT(traverse_module_state->__pyx_n_s_MemoryError); + Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_at_0x_x); + Py_VISIT(traverse_module_state->__pyx_kp_s_MemoryView_of_r_object); + Py_VISIT(traverse_module_state->__pyx_n_b_O); + Py_VISIT(traverse_module_state->__pyx_kp_u_Out_of_bounds_on_buffer_access_a); + Py_VISIT(traverse_module_state->__pyx_n_s_PickleError); + Py_VISIT(traverse_module_state->__pyx_n_s_Sequence); + Py_VISIT(traverse_module_state->__pyx_kp_s_Step_may_not_be_zero_axis_d); + Py_VISIT(traverse_module_state->__pyx_n_s_TypeError); + Py_VISIT(traverse_module_state->__pyx_kp_s_Unable_to_convert_item_to_object); + Py_VISIT(traverse_module_state->__pyx_n_s_ValueError); + Py_VISIT(traverse_module_state->__pyx_n_s_View_MemoryView); + Py_VISIT(traverse_module_state->__pyx_kp_u__2); + Py_VISIT(traverse_module_state->__pyx_n_s__3); + Py_VISIT(traverse_module_state->__pyx_n_s__30); + Py_VISIT(traverse_module_state->__pyx_kp_u__6); + Py_VISIT(traverse_module_state->__pyx_kp_u__7); + Py_VISIT(traverse_module_state->__pyx_n_s_abc); + Py_VISIT(traverse_module_state->__pyx_n_s_abs); + Py_VISIT(traverse_module_state->__pyx_n_s_allocate_buffer); + Py_VISIT(traverse_module_state->__pyx_kp_u_and); + Py_VISIT(traverse_module_state->__pyx_n_s_approximate); + Py_VISIT(traverse_module_state->__pyx_n_s_array); + Py_VISIT(traverse_module_state->__pyx_n_s_astype); + Py_VISIT(traverse_module_state->__pyx_n_s_asyncio_coroutines); + Py_VISIT(traverse_module_state->__pyx_n_s_b); + Py_VISIT(traverse_module_state->__pyx_n_s_base); + Py_VISIT(traverse_module_state->__pyx_n_s_box_intersection); + Py_VISIT(traverse_module_state->__pyx_kp_s_box_intersection_pyx); + Py_VISIT(traverse_module_state->__pyx_n_s_c); + Py_VISIT(traverse_module_state->__pyx_n_u_c); + Py_VISIT(traverse_module_state->__pyx_n_s_cidx); + Py_VISIT(traverse_module_state->__pyx_n_s_class); + Py_VISIT(traverse_module_state->__pyx_n_s_class_getitem); + Py_VISIT(traverse_module_state->__pyx_n_s_clear); + Py_VISIT(traverse_module_state->__pyx_n_s_cline_in_traceback); + Py_VISIT(traverse_module_state->__pyx_n_s_clipPolygon); + Py_VISIT(traverse_module_state->__pyx_n_s_clipVertex); + Py_VISIT(traverse_module_state->__pyx_n_s_collections); + Py_VISIT(traverse_module_state->__pyx_kp_s_collections_abc); + Py_VISIT(traverse_module_state->__pyx_n_s_computeIntersection); + Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_direct); + Py_VISIT(traverse_module_state->__pyx_kp_s_contiguous_and_indirect); + Py_VISIT(traverse_module_state->__pyx_n_s_copy); + Py_VISIT(traverse_module_state->__pyx_n_s_count); + Py_VISIT(traverse_module_state->__pyx_n_s_cp1); + Py_VISIT(traverse_module_state->__pyx_n_s_cp2); + Py_VISIT(traverse_module_state->__pyx_n_s_dc); + Py_VISIT(traverse_module_state->__pyx_n_s_dict); + Py_VISIT(traverse_module_state->__pyx_kp_u_disable); + Py_VISIT(traverse_module_state->__pyx_n_s_dot); + Py_VISIT(traverse_module_state->__pyx_n_s_dp); + Py_VISIT(traverse_module_state->__pyx_n_s_dtype); + Py_VISIT(traverse_module_state->__pyx_n_s_dtype_is_object); + Py_VISIT(traverse_module_state->__pyx_n_s_e); + Py_VISIT(traverse_module_state->__pyx_kp_u_enable); + Py_VISIT(traverse_module_state->__pyx_n_s_encode); + Py_VISIT(traverse_module_state->__pyx_n_s_enumerate); + Py_VISIT(traverse_module_state->__pyx_n_s_error); + Py_VISIT(traverse_module_state->__pyx_n_s_flags); + Py_VISIT(traverse_module_state->__pyx_n_s_float32); + Py_VISIT(traverse_module_state->__pyx_n_s_format); + Py_VISIT(traverse_module_state->__pyx_n_s_fortran); + Py_VISIT(traverse_module_state->__pyx_n_u_fortran); + Py_VISIT(traverse_module_state->__pyx_kp_u_gc); + Py_VISIT(traverse_module_state->__pyx_n_s_getstate); + Py_VISIT(traverse_module_state->__pyx_kp_u_got); + Py_VISIT(traverse_module_state->__pyx_kp_u_got_differing_extents_in_dimensi); + Py_VISIT(traverse_module_state->__pyx_n_s_id); + Py_VISIT(traverse_module_state->__pyx_n_s_iidx); + Py_VISIT(traverse_module_state->__pyx_n_s_import); + Py_VISIT(traverse_module_state->__pyx_n_s_inc); + Py_VISIT(traverse_module_state->__pyx_n_s_index); + Py_VISIT(traverse_module_state->__pyx_n_s_initializing); + Py_VISIT(traverse_module_state->__pyx_n_s_inputList); + Py_VISIT(traverse_module_state->__pyx_n_s_inputList_np); + Py_VISIT(traverse_module_state->__pyx_n_s_inter); + Py_VISIT(traverse_module_state->__pyx_n_s_inter_areas); + Py_VISIT(traverse_module_state->__pyx_n_s_is_coroutine); + Py_VISIT(traverse_module_state->__pyx_kp_u_isenabled); + Py_VISIT(traverse_module_state->__pyx_n_s_itemsize); + Py_VISIT(traverse_module_state->__pyx_kp_s_itemsize_0_for_cython_array); + Py_VISIT(traverse_module_state->__pyx_n_s_k1); + Py_VISIT(traverse_module_state->__pyx_n_s_k2); + Py_VISIT(traverse_module_state->__pyx_n_s_lenc); + Py_VISIT(traverse_module_state->__pyx_n_s_main); + Py_VISIT(traverse_module_state->__pyx_n_s_memview); + Py_VISIT(traverse_module_state->__pyx_n_s_mode); + Py_VISIT(traverse_module_state->__pyx_n_s_n1); + Py_VISIT(traverse_module_state->__pyx_n_s_n2); + Py_VISIT(traverse_module_state->__pyx_n_s_n3); + Py_VISIT(traverse_module_state->__pyx_n_s_name); + Py_VISIT(traverse_module_state->__pyx_n_s_name_2); + Py_VISIT(traverse_module_state->__pyx_n_s_ndim); + Py_VISIT(traverse_module_state->__pyx_n_s_new); + Py_VISIT(traverse_module_state->__pyx_n_s_ninput_list); + Py_VISIT(traverse_module_state->__pyx_n_s_ninter); + Py_VISIT(traverse_module_state->__pyx_kp_s_no_default___reduce___due_to_non); + Py_VISIT(traverse_module_state->__pyx_n_s_non_rot_inter_areas); + Py_VISIT(traverse_module_state->__pyx_n_s_noutput_list); + Py_VISIT(traverse_module_state->__pyx_n_s_np); + Py_VISIT(traverse_module_state->__pyx_n_s_num_clip_points); + Py_VISIT(traverse_module_state->__pyx_n_s_num_intersect_points); + Py_VISIT(traverse_module_state->__pyx_n_s_numpy); + Py_VISIT(traverse_module_state->__pyx_kp_s_numpy_core_multiarray_failed_to); + Py_VISIT(traverse_module_state->__pyx_kp_s_numpy_core_umath_failed_to_impor); + Py_VISIT(traverse_module_state->__pyx_n_s_nums_k2); + Py_VISIT(traverse_module_state->__pyx_n_s_obj); + Py_VISIT(traverse_module_state->__pyx_n_s_outputList); + Py_VISIT(traverse_module_state->__pyx_n_s_outputList_np); + Py_VISIT(traverse_module_state->__pyx_n_s_pack); + Py_VISIT(traverse_module_state->__pyx_n_s_pickle); + Py_VISIT(traverse_module_state->__pyx_n_s_polygon_clip_float); + Py_VISIT(traverse_module_state->__pyx_n_s_polygon_clip_unnest); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_PickleError); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_checksum); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_result); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_state); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_type); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_unpickle_Enum); + Py_VISIT(traverse_module_state->__pyx_n_s_pyx_vtable); + Py_VISIT(traverse_module_state->__pyx_n_s_range); + Py_VISIT(traverse_module_state->__pyx_n_s_rect1); + Py_VISIT(traverse_module_state->__pyx_n_s_rect2); + Py_VISIT(traverse_module_state->__pyx_n_s_reduce); + Py_VISIT(traverse_module_state->__pyx_n_s_reduce_cython); + Py_VISIT(traverse_module_state->__pyx_n_s_reduce_ex); + Py_VISIT(traverse_module_state->__pyx_n_s_register); + Py_VISIT(traverse_module_state->__pyx_n_s_roll); + Py_VISIT(traverse_module_state->__pyx_n_s_s); + Py_VISIT(traverse_module_state->__pyx_n_s_setstate); + Py_VISIT(traverse_module_state->__pyx_n_s_setstate_cython); + Py_VISIT(traverse_module_state->__pyx_n_s_shape); + Py_VISIT(traverse_module_state->__pyx_n_s_size); + Py_VISIT(traverse_module_state->__pyx_n_s_spec); + Py_VISIT(traverse_module_state->__pyx_n_s_start); + Py_VISIT(traverse_module_state->__pyx_n_s_step); + Py_VISIT(traverse_module_state->__pyx_n_s_stop); + Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct); + Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_direct_or_indirect); + Py_VISIT(traverse_module_state->__pyx_kp_s_strided_and_indirect); + Py_VISIT(traverse_module_state->__pyx_kp_s_stringsource); + Py_VISIT(traverse_module_state->__pyx_n_s_struct); + Py_VISIT(traverse_module_state->__pyx_n_s_subjectPolygon); + Py_VISIT(traverse_module_state->__pyx_n_s_subjectVertex); + Py_VISIT(traverse_module_state->__pyx_n_s_sys); + Py_VISIT(traverse_module_state->__pyx_n_s_test); + Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_array_data); + Py_VISIT(traverse_module_state->__pyx_kp_s_unable_to_allocate_shape_and_str); + Py_VISIT(traverse_module_state->__pyx_n_s_unpack); + Py_VISIT(traverse_module_state->__pyx_n_s_update); + Py_VISIT(traverse_module_state->__pyx_n_s_version_info); + Py_VISIT(traverse_module_state->__pyx_n_s_x); + Py_VISIT(traverse_module_state->__pyx_n_s_xs); + Py_VISIT(traverse_module_state->__pyx_n_s_ys); + Py_VISIT(traverse_module_state->__pyx_n_s_zeros); + Py_VISIT(traverse_module_state->__pyx_float_0_5); + Py_VISIT(traverse_module_state->__pyx_float_1_0); + Py_VISIT(traverse_module_state->__pyx_int_0); + Py_VISIT(traverse_module_state->__pyx_int_1); + Py_VISIT(traverse_module_state->__pyx_int_2); + Py_VISIT(traverse_module_state->__pyx_int_3); + Py_VISIT(traverse_module_state->__pyx_int_112105877); + Py_VISIT(traverse_module_state->__pyx_int_136983863); + Py_VISIT(traverse_module_state->__pyx_int_184977713); + Py_VISIT(traverse_module_state->__pyx_int_neg_1); + Py_VISIT(traverse_module_state->__pyx_slice__5); + Py_VISIT(traverse_module_state->__pyx_tuple__4); + Py_VISIT(traverse_module_state->__pyx_tuple__8); + Py_VISIT(traverse_module_state->__pyx_tuple__9); + Py_VISIT(traverse_module_state->__pyx_tuple__10); + Py_VISIT(traverse_module_state->__pyx_tuple__11); + Py_VISIT(traverse_module_state->__pyx_tuple__12); + Py_VISIT(traverse_module_state->__pyx_tuple__13); + Py_VISIT(traverse_module_state->__pyx_tuple__14); + Py_VISIT(traverse_module_state->__pyx_tuple__15); + Py_VISIT(traverse_module_state->__pyx_tuple__16); + Py_VISIT(traverse_module_state->__pyx_tuple__17); + Py_VISIT(traverse_module_state->__pyx_tuple__18); + Py_VISIT(traverse_module_state->__pyx_tuple__19); + Py_VISIT(traverse_module_state->__pyx_tuple__20); + Py_VISIT(traverse_module_state->__pyx_tuple__22); + Py_VISIT(traverse_module_state->__pyx_tuple__24); + Py_VISIT(traverse_module_state->__pyx_tuple__26); + Py_VISIT(traverse_module_state->__pyx_tuple__28); + Py_VISIT(traverse_module_state->__pyx_codeobj__21); + Py_VISIT(traverse_module_state->__pyx_codeobj__23); + Py_VISIT(traverse_module_state->__pyx_codeobj__25); + Py_VISIT(traverse_module_state->__pyx_codeobj__27); + Py_VISIT(traverse_module_state->__pyx_codeobj__29); + return 0; +} +#endif +/* #### Code section: module_state_defines ### */ +#define __pyx_d __pyx_mstate_global->__pyx_d +#define __pyx_b __pyx_mstate_global->__pyx_b +#define __pyx_cython_runtime __pyx_mstate_global->__pyx_cython_runtime +#define __pyx_empty_tuple __pyx_mstate_global->__pyx_empty_tuple +#define __pyx_empty_bytes __pyx_mstate_global->__pyx_empty_bytes +#define __pyx_empty_unicode __pyx_mstate_global->__pyx_empty_unicode +#ifdef __Pyx_CyFunction_USED +#define __pyx_CyFunctionType __pyx_mstate_global->__pyx_CyFunctionType +#endif +#ifdef __Pyx_FusedFunction_USED +#define __pyx_FusedFunctionType __pyx_mstate_global->__pyx_FusedFunctionType +#endif +#ifdef __Pyx_Generator_USED +#define __pyx_GeneratorType __pyx_mstate_global->__pyx_GeneratorType +#endif +#ifdef __Pyx_IterableCoroutine_USED +#define __pyx_IterableCoroutineType __pyx_mstate_global->__pyx_IterableCoroutineType +#endif +#ifdef __Pyx_Coroutine_USED +#define __pyx_CoroutineAwaitType __pyx_mstate_global->__pyx_CoroutineAwaitType +#endif +#ifdef __Pyx_Coroutine_USED +#define __pyx_CoroutineType __pyx_mstate_global->__pyx_CoroutineType +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#define __pyx_ptype_7cpython_4type_type __pyx_mstate_global->__pyx_ptype_7cpython_4type_type +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#define __pyx_ptype_5numpy_dtype __pyx_mstate_global->__pyx_ptype_5numpy_dtype +#define __pyx_ptype_5numpy_flatiter __pyx_mstate_global->__pyx_ptype_5numpy_flatiter +#define __pyx_ptype_5numpy_broadcast __pyx_mstate_global->__pyx_ptype_5numpy_broadcast +#define __pyx_ptype_5numpy_ndarray __pyx_mstate_global->__pyx_ptype_5numpy_ndarray +#define __pyx_ptype_5numpy_generic __pyx_mstate_global->__pyx_ptype_5numpy_generic +#define __pyx_ptype_5numpy_number __pyx_mstate_global->__pyx_ptype_5numpy_number +#define __pyx_ptype_5numpy_integer __pyx_mstate_global->__pyx_ptype_5numpy_integer +#define __pyx_ptype_5numpy_signedinteger __pyx_mstate_global->__pyx_ptype_5numpy_signedinteger +#define __pyx_ptype_5numpy_unsignedinteger __pyx_mstate_global->__pyx_ptype_5numpy_unsignedinteger +#define __pyx_ptype_5numpy_inexact __pyx_mstate_global->__pyx_ptype_5numpy_inexact +#define __pyx_ptype_5numpy_floating __pyx_mstate_global->__pyx_ptype_5numpy_floating +#define __pyx_ptype_5numpy_complexfloating __pyx_mstate_global->__pyx_ptype_5numpy_complexfloating +#define __pyx_ptype_5numpy_flexible __pyx_mstate_global->__pyx_ptype_5numpy_flexible +#define __pyx_ptype_5numpy_character __pyx_mstate_global->__pyx_ptype_5numpy_character +#define __pyx_ptype_5numpy_ufunc __pyx_mstate_global->__pyx_ptype_5numpy_ufunc +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#endif +#if CYTHON_USE_MODULE_STATE +#define __pyx_type___pyx_array __pyx_mstate_global->__pyx_type___pyx_array +#define __pyx_type___pyx_MemviewEnum __pyx_mstate_global->__pyx_type___pyx_MemviewEnum +#define __pyx_type___pyx_memoryview __pyx_mstate_global->__pyx_type___pyx_memoryview +#define __pyx_type___pyx_memoryviewslice __pyx_mstate_global->__pyx_type___pyx_memoryviewslice +#endif +#define __pyx_array_type __pyx_mstate_global->__pyx_array_type +#define __pyx_MemviewEnum_type __pyx_mstate_global->__pyx_MemviewEnum_type +#define __pyx_memoryview_type __pyx_mstate_global->__pyx_memoryview_type +#define __pyx_memoryviewslice_type __pyx_mstate_global->__pyx_memoryviewslice_type +#define __pyx_kp_u_ __pyx_mstate_global->__pyx_kp_u_ +#define __pyx_n_s_ASCII __pyx_mstate_global->__pyx_n_s_ASCII +#define __pyx_kp_s_All_dimensions_preceding_dimensi __pyx_mstate_global->__pyx_kp_s_All_dimensions_preceding_dimensi +#define __pyx_n_s_AssertionError __pyx_mstate_global->__pyx_n_s_AssertionError +#define __pyx_n_s_B __pyx_mstate_global->__pyx_n_s_B +#define __pyx_kp_s_Buffer_view_does_not_expose_stri __pyx_mstate_global->__pyx_kp_s_Buffer_view_does_not_expose_stri +#define __pyx_kp_s_Can_only_create_a_buffer_that_is __pyx_mstate_global->__pyx_kp_s_Can_only_create_a_buffer_that_is +#define __pyx_kp_s_Cannot_assign_to_read_only_memor __pyx_mstate_global->__pyx_kp_s_Cannot_assign_to_read_only_memor +#define __pyx_kp_s_Cannot_create_writable_memory_vi __pyx_mstate_global->__pyx_kp_s_Cannot_create_writable_memory_vi +#define __pyx_kp_u_Cannot_index_with_type __pyx_mstate_global->__pyx_kp_u_Cannot_index_with_type +#define __pyx_kp_s_Cannot_transpose_memoryview_with __pyx_mstate_global->__pyx_kp_s_Cannot_transpose_memoryview_with +#define __pyx_kp_s_Dimension_d_is_not_direct __pyx_mstate_global->__pyx_kp_s_Dimension_d_is_not_direct +#define __pyx_n_s_Ellipsis __pyx_mstate_global->__pyx_n_s_Ellipsis +#define __pyx_kp_s_Empty_shape_tuple_for_cython_arr __pyx_mstate_global->__pyx_kp_s_Empty_shape_tuple_for_cython_arr +#define __pyx_n_s_FLOAT __pyx_mstate_global->__pyx_n_s_FLOAT +#define __pyx_n_s_ImportError __pyx_mstate_global->__pyx_n_s_ImportError +#define __pyx_kp_s_Incompatible_checksums_0x_x_vs_0 __pyx_mstate_global->__pyx_kp_s_Incompatible_checksums_0x_x_vs_0 +#define __pyx_n_s_IndexError __pyx_mstate_global->__pyx_n_s_IndexError +#define __pyx_kp_s_Index_out_of_bounds_axis_d __pyx_mstate_global->__pyx_kp_s_Index_out_of_bounds_axis_d +#define __pyx_kp_s_Indirect_dimensions_not_supporte __pyx_mstate_global->__pyx_kp_s_Indirect_dimensions_not_supporte +#define __pyx_kp_u_Invalid_mode_expected_c_or_fortr __pyx_mstate_global->__pyx_kp_u_Invalid_mode_expected_c_or_fortr +#define __pyx_kp_u_Invalid_shape_in_axis __pyx_mstate_global->__pyx_kp_u_Invalid_shape_in_axis +#define __pyx_n_s_K1 __pyx_mstate_global->__pyx_n_s_K1 +#define __pyx_n_s_K2 __pyx_mstate_global->__pyx_n_s_K2 +#define __pyx_n_s_MAX_INTERSECT_POINTS __pyx_mstate_global->__pyx_n_s_MAX_INTERSECT_POINTS +#define __pyx_n_s_MemoryError __pyx_mstate_global->__pyx_n_s_MemoryError +#define __pyx_kp_s_MemoryView_of_r_at_0x_x __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_at_0x_x +#define __pyx_kp_s_MemoryView_of_r_object __pyx_mstate_global->__pyx_kp_s_MemoryView_of_r_object +#define __pyx_n_b_O __pyx_mstate_global->__pyx_n_b_O +#define __pyx_kp_u_Out_of_bounds_on_buffer_access_a __pyx_mstate_global->__pyx_kp_u_Out_of_bounds_on_buffer_access_a +#define __pyx_n_s_PickleError __pyx_mstate_global->__pyx_n_s_PickleError +#define __pyx_n_s_Sequence __pyx_mstate_global->__pyx_n_s_Sequence +#define __pyx_kp_s_Step_may_not_be_zero_axis_d __pyx_mstate_global->__pyx_kp_s_Step_may_not_be_zero_axis_d +#define __pyx_n_s_TypeError __pyx_mstate_global->__pyx_n_s_TypeError +#define __pyx_kp_s_Unable_to_convert_item_to_object __pyx_mstate_global->__pyx_kp_s_Unable_to_convert_item_to_object +#define __pyx_n_s_ValueError __pyx_mstate_global->__pyx_n_s_ValueError +#define __pyx_n_s_View_MemoryView __pyx_mstate_global->__pyx_n_s_View_MemoryView +#define __pyx_kp_u__2 __pyx_mstate_global->__pyx_kp_u__2 +#define __pyx_n_s__3 __pyx_mstate_global->__pyx_n_s__3 +#define __pyx_n_s__30 __pyx_mstate_global->__pyx_n_s__30 +#define __pyx_kp_u__6 __pyx_mstate_global->__pyx_kp_u__6 +#define __pyx_kp_u__7 __pyx_mstate_global->__pyx_kp_u__7 +#define __pyx_n_s_abc __pyx_mstate_global->__pyx_n_s_abc +#define __pyx_n_s_abs __pyx_mstate_global->__pyx_n_s_abs +#define __pyx_n_s_allocate_buffer __pyx_mstate_global->__pyx_n_s_allocate_buffer +#define __pyx_kp_u_and __pyx_mstate_global->__pyx_kp_u_and +#define __pyx_n_s_approximate __pyx_mstate_global->__pyx_n_s_approximate +#define __pyx_n_s_array __pyx_mstate_global->__pyx_n_s_array +#define __pyx_n_s_astype __pyx_mstate_global->__pyx_n_s_astype +#define __pyx_n_s_asyncio_coroutines __pyx_mstate_global->__pyx_n_s_asyncio_coroutines +#define __pyx_n_s_b __pyx_mstate_global->__pyx_n_s_b +#define __pyx_n_s_base __pyx_mstate_global->__pyx_n_s_base +#define __pyx_n_s_box_intersection __pyx_mstate_global->__pyx_n_s_box_intersection +#define __pyx_kp_s_box_intersection_pyx __pyx_mstate_global->__pyx_kp_s_box_intersection_pyx +#define __pyx_n_s_c __pyx_mstate_global->__pyx_n_s_c +#define __pyx_n_u_c __pyx_mstate_global->__pyx_n_u_c +#define __pyx_n_s_cidx __pyx_mstate_global->__pyx_n_s_cidx +#define __pyx_n_s_class __pyx_mstate_global->__pyx_n_s_class +#define __pyx_n_s_class_getitem __pyx_mstate_global->__pyx_n_s_class_getitem +#define __pyx_n_s_clear __pyx_mstate_global->__pyx_n_s_clear +#define __pyx_n_s_cline_in_traceback __pyx_mstate_global->__pyx_n_s_cline_in_traceback +#define __pyx_n_s_clipPolygon __pyx_mstate_global->__pyx_n_s_clipPolygon +#define __pyx_n_s_clipVertex __pyx_mstate_global->__pyx_n_s_clipVertex +#define __pyx_n_s_collections __pyx_mstate_global->__pyx_n_s_collections +#define __pyx_kp_s_collections_abc __pyx_mstate_global->__pyx_kp_s_collections_abc +#define __pyx_n_s_computeIntersection __pyx_mstate_global->__pyx_n_s_computeIntersection +#define __pyx_kp_s_contiguous_and_direct __pyx_mstate_global->__pyx_kp_s_contiguous_and_direct +#define __pyx_kp_s_contiguous_and_indirect __pyx_mstate_global->__pyx_kp_s_contiguous_and_indirect +#define __pyx_n_s_copy __pyx_mstate_global->__pyx_n_s_copy +#define __pyx_n_s_count __pyx_mstate_global->__pyx_n_s_count +#define __pyx_n_s_cp1 __pyx_mstate_global->__pyx_n_s_cp1 +#define __pyx_n_s_cp2 __pyx_mstate_global->__pyx_n_s_cp2 +#define __pyx_n_s_dc __pyx_mstate_global->__pyx_n_s_dc +#define __pyx_n_s_dict __pyx_mstate_global->__pyx_n_s_dict +#define __pyx_kp_u_disable __pyx_mstate_global->__pyx_kp_u_disable +#define __pyx_n_s_dot __pyx_mstate_global->__pyx_n_s_dot +#define __pyx_n_s_dp __pyx_mstate_global->__pyx_n_s_dp +#define __pyx_n_s_dtype __pyx_mstate_global->__pyx_n_s_dtype +#define __pyx_n_s_dtype_is_object __pyx_mstate_global->__pyx_n_s_dtype_is_object +#define __pyx_n_s_e __pyx_mstate_global->__pyx_n_s_e +#define __pyx_kp_u_enable __pyx_mstate_global->__pyx_kp_u_enable +#define __pyx_n_s_encode __pyx_mstate_global->__pyx_n_s_encode +#define __pyx_n_s_enumerate __pyx_mstate_global->__pyx_n_s_enumerate +#define __pyx_n_s_error __pyx_mstate_global->__pyx_n_s_error +#define __pyx_n_s_flags __pyx_mstate_global->__pyx_n_s_flags +#define __pyx_n_s_float32 __pyx_mstate_global->__pyx_n_s_float32 +#define __pyx_n_s_format __pyx_mstate_global->__pyx_n_s_format +#define __pyx_n_s_fortran __pyx_mstate_global->__pyx_n_s_fortran +#define __pyx_n_u_fortran __pyx_mstate_global->__pyx_n_u_fortran +#define __pyx_kp_u_gc __pyx_mstate_global->__pyx_kp_u_gc +#define __pyx_n_s_getstate __pyx_mstate_global->__pyx_n_s_getstate +#define __pyx_kp_u_got __pyx_mstate_global->__pyx_kp_u_got +#define __pyx_kp_u_got_differing_extents_in_dimensi __pyx_mstate_global->__pyx_kp_u_got_differing_extents_in_dimensi +#define __pyx_n_s_id __pyx_mstate_global->__pyx_n_s_id +#define __pyx_n_s_iidx __pyx_mstate_global->__pyx_n_s_iidx +#define __pyx_n_s_import __pyx_mstate_global->__pyx_n_s_import +#define __pyx_n_s_inc __pyx_mstate_global->__pyx_n_s_inc +#define __pyx_n_s_index __pyx_mstate_global->__pyx_n_s_index +#define __pyx_n_s_initializing __pyx_mstate_global->__pyx_n_s_initializing +#define __pyx_n_s_inputList __pyx_mstate_global->__pyx_n_s_inputList +#define __pyx_n_s_inputList_np __pyx_mstate_global->__pyx_n_s_inputList_np +#define __pyx_n_s_inter __pyx_mstate_global->__pyx_n_s_inter +#define __pyx_n_s_inter_areas __pyx_mstate_global->__pyx_n_s_inter_areas +#define __pyx_n_s_is_coroutine __pyx_mstate_global->__pyx_n_s_is_coroutine +#define __pyx_kp_u_isenabled __pyx_mstate_global->__pyx_kp_u_isenabled +#define __pyx_n_s_itemsize __pyx_mstate_global->__pyx_n_s_itemsize +#define __pyx_kp_s_itemsize_0_for_cython_array __pyx_mstate_global->__pyx_kp_s_itemsize_0_for_cython_array +#define __pyx_n_s_k1 __pyx_mstate_global->__pyx_n_s_k1 +#define __pyx_n_s_k2 __pyx_mstate_global->__pyx_n_s_k2 +#define __pyx_n_s_lenc __pyx_mstate_global->__pyx_n_s_lenc +#define __pyx_n_s_main __pyx_mstate_global->__pyx_n_s_main +#define __pyx_n_s_memview __pyx_mstate_global->__pyx_n_s_memview +#define __pyx_n_s_mode __pyx_mstate_global->__pyx_n_s_mode +#define __pyx_n_s_n1 __pyx_mstate_global->__pyx_n_s_n1 +#define __pyx_n_s_n2 __pyx_mstate_global->__pyx_n_s_n2 +#define __pyx_n_s_n3 __pyx_mstate_global->__pyx_n_s_n3 +#define __pyx_n_s_name __pyx_mstate_global->__pyx_n_s_name +#define __pyx_n_s_name_2 __pyx_mstate_global->__pyx_n_s_name_2 +#define __pyx_n_s_ndim __pyx_mstate_global->__pyx_n_s_ndim +#define __pyx_n_s_new __pyx_mstate_global->__pyx_n_s_new +#define __pyx_n_s_ninput_list __pyx_mstate_global->__pyx_n_s_ninput_list +#define __pyx_n_s_ninter __pyx_mstate_global->__pyx_n_s_ninter +#define __pyx_kp_s_no_default___reduce___due_to_non __pyx_mstate_global->__pyx_kp_s_no_default___reduce___due_to_non +#define __pyx_n_s_non_rot_inter_areas __pyx_mstate_global->__pyx_n_s_non_rot_inter_areas +#define __pyx_n_s_noutput_list __pyx_mstate_global->__pyx_n_s_noutput_list +#define __pyx_n_s_np __pyx_mstate_global->__pyx_n_s_np +#define __pyx_n_s_num_clip_points __pyx_mstate_global->__pyx_n_s_num_clip_points +#define __pyx_n_s_num_intersect_points __pyx_mstate_global->__pyx_n_s_num_intersect_points +#define __pyx_n_s_numpy __pyx_mstate_global->__pyx_n_s_numpy +#define __pyx_kp_s_numpy_core_multiarray_failed_to __pyx_mstate_global->__pyx_kp_s_numpy_core_multiarray_failed_to +#define __pyx_kp_s_numpy_core_umath_failed_to_impor __pyx_mstate_global->__pyx_kp_s_numpy_core_umath_failed_to_impor +#define __pyx_n_s_nums_k2 __pyx_mstate_global->__pyx_n_s_nums_k2 +#define __pyx_n_s_obj __pyx_mstate_global->__pyx_n_s_obj +#define __pyx_n_s_outputList __pyx_mstate_global->__pyx_n_s_outputList +#define __pyx_n_s_outputList_np __pyx_mstate_global->__pyx_n_s_outputList_np +#define __pyx_n_s_pack __pyx_mstate_global->__pyx_n_s_pack +#define __pyx_n_s_pickle __pyx_mstate_global->__pyx_n_s_pickle +#define __pyx_n_s_polygon_clip_float __pyx_mstate_global->__pyx_n_s_polygon_clip_float +#define __pyx_n_s_polygon_clip_unnest __pyx_mstate_global->__pyx_n_s_polygon_clip_unnest +#define __pyx_n_s_pyx_PickleError __pyx_mstate_global->__pyx_n_s_pyx_PickleError +#define __pyx_n_s_pyx_checksum __pyx_mstate_global->__pyx_n_s_pyx_checksum +#define __pyx_n_s_pyx_result __pyx_mstate_global->__pyx_n_s_pyx_result +#define __pyx_n_s_pyx_state __pyx_mstate_global->__pyx_n_s_pyx_state +#define __pyx_n_s_pyx_type __pyx_mstate_global->__pyx_n_s_pyx_type +#define __pyx_n_s_pyx_unpickle_Enum __pyx_mstate_global->__pyx_n_s_pyx_unpickle_Enum +#define __pyx_n_s_pyx_vtable __pyx_mstate_global->__pyx_n_s_pyx_vtable +#define __pyx_n_s_range __pyx_mstate_global->__pyx_n_s_range +#define __pyx_n_s_rect1 __pyx_mstate_global->__pyx_n_s_rect1 +#define __pyx_n_s_rect2 __pyx_mstate_global->__pyx_n_s_rect2 +#define __pyx_n_s_reduce __pyx_mstate_global->__pyx_n_s_reduce +#define __pyx_n_s_reduce_cython __pyx_mstate_global->__pyx_n_s_reduce_cython +#define __pyx_n_s_reduce_ex __pyx_mstate_global->__pyx_n_s_reduce_ex +#define __pyx_n_s_register __pyx_mstate_global->__pyx_n_s_register +#define __pyx_n_s_roll __pyx_mstate_global->__pyx_n_s_roll +#define __pyx_n_s_s __pyx_mstate_global->__pyx_n_s_s +#define __pyx_n_s_setstate __pyx_mstate_global->__pyx_n_s_setstate +#define __pyx_n_s_setstate_cython __pyx_mstate_global->__pyx_n_s_setstate_cython +#define __pyx_n_s_shape __pyx_mstate_global->__pyx_n_s_shape +#define __pyx_n_s_size __pyx_mstate_global->__pyx_n_s_size +#define __pyx_n_s_spec __pyx_mstate_global->__pyx_n_s_spec +#define __pyx_n_s_start __pyx_mstate_global->__pyx_n_s_start +#define __pyx_n_s_step __pyx_mstate_global->__pyx_n_s_step +#define __pyx_n_s_stop __pyx_mstate_global->__pyx_n_s_stop +#define __pyx_kp_s_strided_and_direct __pyx_mstate_global->__pyx_kp_s_strided_and_direct +#define __pyx_kp_s_strided_and_direct_or_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_direct_or_indirect +#define __pyx_kp_s_strided_and_indirect __pyx_mstate_global->__pyx_kp_s_strided_and_indirect +#define __pyx_kp_s_stringsource __pyx_mstate_global->__pyx_kp_s_stringsource +#define __pyx_n_s_struct __pyx_mstate_global->__pyx_n_s_struct +#define __pyx_n_s_subjectPolygon __pyx_mstate_global->__pyx_n_s_subjectPolygon +#define __pyx_n_s_subjectVertex __pyx_mstate_global->__pyx_n_s_subjectVertex +#define __pyx_n_s_sys __pyx_mstate_global->__pyx_n_s_sys +#define __pyx_n_s_test __pyx_mstate_global->__pyx_n_s_test +#define __pyx_kp_s_unable_to_allocate_array_data __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_array_data +#define __pyx_kp_s_unable_to_allocate_shape_and_str __pyx_mstate_global->__pyx_kp_s_unable_to_allocate_shape_and_str +#define __pyx_n_s_unpack __pyx_mstate_global->__pyx_n_s_unpack +#define __pyx_n_s_update __pyx_mstate_global->__pyx_n_s_update +#define __pyx_n_s_version_info __pyx_mstate_global->__pyx_n_s_version_info +#define __pyx_n_s_x __pyx_mstate_global->__pyx_n_s_x +#define __pyx_n_s_xs __pyx_mstate_global->__pyx_n_s_xs +#define __pyx_n_s_ys __pyx_mstate_global->__pyx_n_s_ys +#define __pyx_n_s_zeros __pyx_mstate_global->__pyx_n_s_zeros +#define __pyx_float_0_5 __pyx_mstate_global->__pyx_float_0_5 +#define __pyx_float_1_0 __pyx_mstate_global->__pyx_float_1_0 +#define __pyx_int_0 __pyx_mstate_global->__pyx_int_0 +#define __pyx_int_1 __pyx_mstate_global->__pyx_int_1 +#define __pyx_int_2 __pyx_mstate_global->__pyx_int_2 +#define __pyx_int_3 __pyx_mstate_global->__pyx_int_3 +#define __pyx_int_112105877 __pyx_mstate_global->__pyx_int_112105877 +#define __pyx_int_136983863 __pyx_mstate_global->__pyx_int_136983863 +#define __pyx_int_184977713 __pyx_mstate_global->__pyx_int_184977713 +#define __pyx_int_neg_1 __pyx_mstate_global->__pyx_int_neg_1 +#define __pyx_slice__5 __pyx_mstate_global->__pyx_slice__5 +#define __pyx_tuple__4 __pyx_mstate_global->__pyx_tuple__4 +#define __pyx_tuple__8 __pyx_mstate_global->__pyx_tuple__8 +#define __pyx_tuple__9 __pyx_mstate_global->__pyx_tuple__9 +#define __pyx_tuple__10 __pyx_mstate_global->__pyx_tuple__10 +#define __pyx_tuple__11 __pyx_mstate_global->__pyx_tuple__11 +#define __pyx_tuple__12 __pyx_mstate_global->__pyx_tuple__12 +#define __pyx_tuple__13 __pyx_mstate_global->__pyx_tuple__13 +#define __pyx_tuple__14 __pyx_mstate_global->__pyx_tuple__14 +#define __pyx_tuple__15 __pyx_mstate_global->__pyx_tuple__15 +#define __pyx_tuple__16 __pyx_mstate_global->__pyx_tuple__16 +#define __pyx_tuple__17 __pyx_mstate_global->__pyx_tuple__17 +#define __pyx_tuple__18 __pyx_mstate_global->__pyx_tuple__18 +#define __pyx_tuple__19 __pyx_mstate_global->__pyx_tuple__19 +#define __pyx_tuple__20 __pyx_mstate_global->__pyx_tuple__20 +#define __pyx_tuple__22 __pyx_mstate_global->__pyx_tuple__22 +#define __pyx_tuple__24 __pyx_mstate_global->__pyx_tuple__24 +#define __pyx_tuple__26 __pyx_mstate_global->__pyx_tuple__26 +#define __pyx_tuple__28 __pyx_mstate_global->__pyx_tuple__28 +#define __pyx_codeobj__21 __pyx_mstate_global->__pyx_codeobj__21 +#define __pyx_codeobj__23 __pyx_mstate_global->__pyx_codeobj__23 +#define __pyx_codeobj__25 __pyx_mstate_global->__pyx_codeobj__25 +#define __pyx_codeobj__27 __pyx_mstate_global->__pyx_codeobj__27 +#define __pyx_codeobj__29 __pyx_mstate_global->__pyx_codeobj__29 +/* #### Code section: module_code ### */ + +/* "View.MemoryView":131 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + +/* Python wrapper */ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_array___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_shape = 0; + Py_ssize_t __pyx_v_itemsize; + PyObject *__pyx_v_format = 0; + PyObject *__pyx_v_mode = 0; + int __pyx_v_allocate_buffer; + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[5] = {0,0,0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 131, __pyx_L3_error) + #endif + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_shape,&__pyx_n_s_itemsize,&__pyx_n_s_format,&__pyx_n_s_mode,&__pyx_n_s_allocate_buffer,0}; + values[3] = __Pyx_Arg_NewRef_VARARGS(((PyObject *)__pyx_n_s_c)); + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_shape)) != 0)) { + (void)__Pyx_Arg_NewRef_VARARGS(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_itemsize)) != 0)) { + (void)__Pyx_Arg_NewRef_VARARGS(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 1); __PYX_ERR(1, 131, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_format)) != 0)) { + (void)__Pyx_Arg_NewRef_VARARGS(values[2]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, 2); __PYX_ERR(1, 131, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (kw_args > 0) { + PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_mode); + if (value) { values[3] = __Pyx_Arg_NewRef_VARARGS(value); kw_args--; } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (kw_args > 0) { + PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_allocate_buffer); + if (value) { values[4] = __Pyx_Arg_NewRef_VARARGS(value); kw_args--; } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 131, __pyx_L3_error) + } + } else { + switch (__pyx_nargs) { + case 5: values[4] = __Pyx_Arg_VARARGS(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = __Pyx_Arg_VARARGS(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); + values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); + values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_shape = ((PyObject*)values[0]); + __pyx_v_itemsize = __Pyx_PyIndex_AsSsize_t(values[1]); if (unlikely((__pyx_v_itemsize == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 131, __pyx_L3_error) + __pyx_v_format = values[2]; + __pyx_v_mode = values[3]; + if (values[4]) { + __pyx_v_allocate_buffer = __Pyx_PyObject_IsTrue(values[4]); if (unlikely((__pyx_v_allocate_buffer == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 132, __pyx_L3_error) + } else { + + /* "View.MemoryView":132 + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, + * mode="c", bint allocate_buffer=True): # <<<<<<<<<<<<<< + * + * cdef int idx + */ + __pyx_v_allocate_buffer = ((int)1); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 3, 5, __pyx_nargs); __PYX_ERR(1, 131, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_VARARGS(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + if (unlikely(!__Pyx_ArgTypeTest(((PyObject *)__pyx_v_shape), (&PyTuple_Type), 1, "shape", 1))) __PYX_ERR(1, 131, __pyx_L1_error) + if (unlikely(((PyObject *)__pyx_v_format) == Py_None)) { + PyErr_Format(PyExc_TypeError, "Argument '%.200s' must not be None", "format"); __PYX_ERR(1, 131, __pyx_L1_error) + } + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v_shape, __pyx_v_itemsize, __pyx_v_format, __pyx_v_mode, __pyx_v_allocate_buffer); + + /* "View.MemoryView":131 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + goto __pyx_L0; + __pyx_L1_error:; + __pyx_r = -1; + __pyx_L0:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_VARARGS(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array___cinit__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, PyObject *__pyx_v_format, PyObject *__pyx_v_mode, int __pyx_v_allocate_buffer) { + int __pyx_v_idx; + Py_ssize_t __pyx_v_dim; + char __pyx_v_order; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + char *__pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_UCS4 __pyx_t_10; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + __Pyx_INCREF(__pyx_v_format); + + /* "View.MemoryView":137 + * cdef Py_ssize_t dim + * + * self.ndim = len(shape) # <<<<<<<<<<<<<< + * self.itemsize = itemsize + * + */ + if (unlikely(__pyx_v_shape == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 137, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_PyTuple_GET_SIZE(__pyx_v_shape); if (unlikely(__pyx_t_1 == ((Py_ssize_t)-1))) __PYX_ERR(1, 137, __pyx_L1_error) + __pyx_v_self->ndim = ((int)__pyx_t_1); + + /* "View.MemoryView":138 + * + * self.ndim = len(shape) + * self.itemsize = itemsize # <<<<<<<<<<<<<< + * + * if not self.ndim: + */ + __pyx_v_self->itemsize = __pyx_v_itemsize; + + /* "View.MemoryView":140 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError, "Empty shape tuple for cython.array" + * + */ + __pyx_t_2 = (!(__pyx_v_self->ndim != 0)); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":141 + * + * if not self.ndim: + * raise ValueError, "Empty shape tuple for cython.array" # <<<<<<<<<<<<<< + * + * if itemsize <= 0: + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Empty_shape_tuple_for_cython_arr, 0, 0); + __PYX_ERR(1, 141, __pyx_L1_error) + + /* "View.MemoryView":140 + * self.itemsize = itemsize + * + * if not self.ndim: # <<<<<<<<<<<<<< + * raise ValueError, "Empty shape tuple for cython.array" + * + */ + } + + /* "View.MemoryView":143 + * raise ValueError, "Empty shape tuple for cython.array" + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError, "itemsize <= 0 for cython.array" + * + */ + __pyx_t_2 = (__pyx_v_itemsize <= 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":144 + * + * if itemsize <= 0: + * raise ValueError, "itemsize <= 0 for cython.array" # <<<<<<<<<<<<<< + * + * if not isinstance(format, bytes): + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_itemsize_0_for_cython_array, 0, 0); + __PYX_ERR(1, 144, __pyx_L1_error) + + /* "View.MemoryView":143 + * raise ValueError, "Empty shape tuple for cython.array" + * + * if itemsize <= 0: # <<<<<<<<<<<<<< + * raise ValueError, "itemsize <= 0 for cython.array" + * + */ + } + + /* "View.MemoryView":146 + * raise ValueError, "itemsize <= 0 for cython.array" + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + __pyx_t_2 = PyBytes_Check(__pyx_v_format); + __pyx_t_3 = (!__pyx_t_2); + if (__pyx_t_3) { + + /* "View.MemoryView":147 + * + * if not isinstance(format, bytes): + * format = format.encode('ASCII') # <<<<<<<<<<<<<< + * self._format = format # keep a reference to the byte string + * self.format = self._format + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_format, __pyx_n_s_encode); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = NULL; + __pyx_t_7 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_6 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_6)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_6); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_7 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_6, __pyx_n_s_ASCII}; + __pyx_t_4 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_7, 1+__pyx_t_7); + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 147, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __Pyx_DECREF_SET(__pyx_v_format, __pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":146 + * raise ValueError, "itemsize <= 0 for cython.array" + * + * if not isinstance(format, bytes): # <<<<<<<<<<<<<< + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + */ + } + + /* "View.MemoryView":148 + * if not isinstance(format, bytes): + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string # <<<<<<<<<<<<<< + * self.format = self._format + * + */ + if (!(likely(PyBytes_CheckExact(__pyx_v_format))||((__pyx_v_format) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_v_format))) __PYX_ERR(1, 148, __pyx_L1_error) + __pyx_t_4 = __pyx_v_format; + __Pyx_INCREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_4); + __Pyx_GOTREF(__pyx_v_self->_format); + __Pyx_DECREF(__pyx_v_self->_format); + __pyx_v_self->_format = ((PyObject*)__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":149 + * format = format.encode('ASCII') + * self._format = format # keep a reference to the byte string + * self.format = self._format # <<<<<<<<<<<<<< + * + * + */ + if (unlikely(__pyx_v_self->_format == Py_None)) { + PyErr_SetString(PyExc_TypeError, "expected bytes, NoneType found"); + __PYX_ERR(1, 149, __pyx_L1_error) + } + __pyx_t_8 = __Pyx_PyBytes_AsWritableString(__pyx_v_self->_format); if (unlikely((!__pyx_t_8) && PyErr_Occurred())) __PYX_ERR(1, 149, __pyx_L1_error) + __pyx_v_self->format = __pyx_t_8; + + /* "View.MemoryView":152 + * + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) # <<<<<<<<<<<<<< + * self._strides = self._shape + self.ndim + * + */ + __pyx_v_self->_shape = ((Py_ssize_t *)PyObject_Malloc((((sizeof(Py_ssize_t)) * __pyx_v_self->ndim) * 2))); + + /* "View.MemoryView":153 + * + * self._shape = PyObject_Malloc(sizeof(Py_ssize_t)*self.ndim*2) + * self._strides = self._shape + self.ndim # <<<<<<<<<<<<<< + * + * if not self._shape: + */ + __pyx_v_self->_strides = (__pyx_v_self->_shape + __pyx_v_self->ndim); + + /* "View.MemoryView":155 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError, "unable to allocate shape and strides." + * + */ + __pyx_t_3 = (!(__pyx_v_self->_shape != 0)); + if (unlikely(__pyx_t_3)) { + + /* "View.MemoryView":156 + * + * if not self._shape: + * raise MemoryError, "unable to allocate shape and strides." # <<<<<<<<<<<<<< + * + * + */ + __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_shape_and_str, 0, 0); + __PYX_ERR(1, 156, __pyx_L1_error) + + /* "View.MemoryView":155 + * self._strides = self._shape + self.ndim + * + * if not self._shape: # <<<<<<<<<<<<<< + * raise MemoryError, "unable to allocate shape and strides." + * + */ + } + + /* "View.MemoryView":159 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError, f"Invalid shape in axis {idx}: {dim}." + */ + __pyx_t_7 = 0; + __pyx_t_4 = __pyx_v_shape; __Pyx_INCREF(__pyx_t_4); __pyx_t_1 = 0; + for (;;) { + if (__pyx_t_1 >= PyTuple_GET_SIZE(__pyx_t_4)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_4, __pyx_t_1); __Pyx_INCREF(__pyx_t_5); __pyx_t_1++; if (unlikely((0 < 0))) __PYX_ERR(1, 159, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_4, __pyx_t_1); __pyx_t_1++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 159, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_5); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 159, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_9; + __pyx_v_idx = __pyx_t_7; + __pyx_t_7 = (__pyx_t_7 + 1); + + /* "View.MemoryView":160 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError, f"Invalid shape in axis {idx}: {dim}." + * self._shape[idx] = dim + */ + __pyx_t_3 = (__pyx_v_dim <= 0); + if (unlikely(__pyx_t_3)) { + + /* "View.MemoryView":161 + * for idx, dim in enumerate(shape): + * if dim <= 0: + * raise ValueError, f"Invalid shape in axis {idx}: {dim}." # <<<<<<<<<<<<<< + * self._shape[idx] = dim + * + */ + __pyx_t_5 = PyTuple_New(5); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 161, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_9 = 0; + __pyx_t_10 = 127; + __Pyx_INCREF(__pyx_kp_u_Invalid_shape_in_axis); + __pyx_t_9 += 22; + __Pyx_GIVEREF(__pyx_kp_u_Invalid_shape_in_axis); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Invalid_shape_in_axis); + __pyx_t_6 = __Pyx_PyUnicode_From_int(__pyx_v_idx, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_6); + __pyx_t_6 = 0; + __Pyx_INCREF(__pyx_kp_u_); + __pyx_t_9 += 2; + __Pyx_GIVEREF(__pyx_kp_u_); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u_); + __pyx_t_6 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_9 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_6); + PyTuple_SET_ITEM(__pyx_t_5, 3, __pyx_t_6); + __pyx_t_6 = 0; + __Pyx_INCREF(__pyx_kp_u__2); + __pyx_t_9 += 1; + __Pyx_GIVEREF(__pyx_kp_u__2); + PyTuple_SET_ITEM(__pyx_t_5, 4, __pyx_kp_u__2); + __pyx_t_6 = __Pyx_PyUnicode_Join(__pyx_t_5, 5, __pyx_t_9, __pyx_t_10); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 161, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 161, __pyx_L1_error) + + /* "View.MemoryView":160 + * + * for idx, dim in enumerate(shape): + * if dim <= 0: # <<<<<<<<<<<<<< + * raise ValueError, f"Invalid shape in axis {idx}: {dim}." + * self._shape[idx] = dim + */ + } + + /* "View.MemoryView":162 + * if dim <= 0: + * raise ValueError, f"Invalid shape in axis {idx}: {dim}." + * self._shape[idx] = dim # <<<<<<<<<<<<<< + * + * cdef char order + */ + (__pyx_v_self->_shape[__pyx_v_idx]) = __pyx_v_dim; + + /* "View.MemoryView":159 + * + * + * for idx, dim in enumerate(shape): # <<<<<<<<<<<<<< + * if dim <= 0: + * raise ValueError, f"Invalid shape in axis {idx}: {dim}." + */ + } + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "View.MemoryView":165 + * + * cdef char order + * if mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_c, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 165, __pyx_L1_error) + if (__pyx_t_3) { + + /* "View.MemoryView":166 + * cdef char order + * if mode == 'c': + * order = b'C' # <<<<<<<<<<<<<< + * self.mode = u'c' + * elif mode == 'fortran': + */ + __pyx_v_order = 'C'; + + /* "View.MemoryView":167 + * if mode == 'c': + * order = b'C' + * self.mode = u'c' # <<<<<<<<<<<<<< + * elif mode == 'fortran': + * order = b'F' + */ + __Pyx_INCREF(__pyx_n_u_c); + __Pyx_GIVEREF(__pyx_n_u_c); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_c; + + /* "View.MemoryView":165 + * + * cdef char order + * if mode == 'c': # <<<<<<<<<<<<<< + * order = b'C' + * self.mode = u'c' + */ + goto __pyx_L11; + } + + /* "View.MemoryView":168 + * order = b'C' + * self.mode = u'c' + * elif mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + __pyx_t_3 = (__Pyx_PyString_Equals(__pyx_v_mode, __pyx_n_s_fortran, Py_EQ)); if (unlikely((__pyx_t_3 < 0))) __PYX_ERR(1, 168, __pyx_L1_error) + if (likely(__pyx_t_3)) { + + /* "View.MemoryView":169 + * self.mode = u'c' + * elif mode == 'fortran': + * order = b'F' # <<<<<<<<<<<<<< + * self.mode = u'fortran' + * else: + */ + __pyx_v_order = 'F'; + + /* "View.MemoryView":170 + * elif mode == 'fortran': + * order = b'F' + * self.mode = u'fortran' # <<<<<<<<<<<<<< + * else: + * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" + */ + __Pyx_INCREF(__pyx_n_u_fortran); + __Pyx_GIVEREF(__pyx_n_u_fortran); + __Pyx_GOTREF(__pyx_v_self->mode); + __Pyx_DECREF(__pyx_v_self->mode); + __pyx_v_self->mode = __pyx_n_u_fortran; + + /* "View.MemoryView":168 + * order = b'C' + * self.mode = u'c' + * elif mode == 'fortran': # <<<<<<<<<<<<<< + * order = b'F' + * self.mode = u'fortran' + */ + goto __pyx_L11; + } + + /* "View.MemoryView":172 + * self.mode = u'fortran' + * else: + * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" # <<<<<<<<<<<<<< + * + * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_FormatSimple(__pyx_v_mode, __pyx_empty_unicode); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 172, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_6 = __Pyx_PyUnicode_Concat(__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 172, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_6, 0, 0); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __PYX_ERR(1, 172, __pyx_L1_error) + } + __pyx_L11:; + + /* "View.MemoryView":174 + * raise ValueError, f"Invalid mode, expected 'c' or 'fortran', got {mode}" + * + * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) # <<<<<<<<<<<<<< + * + * self.free_data = allocate_buffer + */ + __pyx_v_self->len = __pyx_fill_contig_strides_array(__pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_itemsize, __pyx_v_self->ndim, __pyx_v_order); + + /* "View.MemoryView":176 + * self.len = fill_contig_strides_array(self._shape, self._strides, itemsize, self.ndim, order) + * + * self.free_data = allocate_buffer # <<<<<<<<<<<<<< + * self.dtype_is_object = format == b'O' + * + */ + __pyx_v_self->free_data = __pyx_v_allocate_buffer; + + /* "View.MemoryView":177 + * + * self.free_data = allocate_buffer + * self.dtype_is_object = format == b'O' # <<<<<<<<<<<<<< + * + * if allocate_buffer: + */ + __pyx_t_6 = PyObject_RichCompare(__pyx_v_format, __pyx_n_b_O, Py_EQ); __Pyx_XGOTREF(__pyx_t_6); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 177, __pyx_L1_error) + __pyx_t_3 = __Pyx_PyObject_IsTrue(__pyx_t_6); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 177, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_v_self->dtype_is_object = __pyx_t_3; + + /* "View.MemoryView":179 + * self.dtype_is_object = format == b'O' + * + * if allocate_buffer: # <<<<<<<<<<<<<< + * _allocate_buffer(self) + * + */ + if (__pyx_v_allocate_buffer) { + + /* "View.MemoryView":180 + * + * if allocate_buffer: + * _allocate_buffer(self) # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + __pyx_t_7 = __pyx_array_allocate_buffer(__pyx_v_self); if (unlikely(__pyx_t_7 == ((int)-1))) __PYX_ERR(1, 180, __pyx_L1_error) + + /* "View.MemoryView":179 + * self.dtype_is_object = format == b'O' + * + * if allocate_buffer: # <<<<<<<<<<<<<< + * _allocate_buffer(self) + * + */ + } + + /* "View.MemoryView":131 + * cdef bint dtype_is_object + * + * def __cinit__(array self, tuple shape, Py_ssize_t itemsize, format not None, # <<<<<<<<<<<<<< + * mode="c", bint allocate_buffer=True): + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.array.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_format); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":182 + * _allocate_buffer(self) + * + * @cname('getbuffer') # <<<<<<<<<<<<<< + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + */ + +/* Python wrapper */ +CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +CYTHON_UNUSED static int __pyx_array_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(((struct __pyx_array_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_2__getbuffer__(struct __pyx_array_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_v_bufmode; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + char *__pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + Py_ssize_t *__pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (unlikely(__pyx_v_info == NULL)) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":184 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 # <<<<<<<<<<<<<< + * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): + * if self.mode == u"c": + */ + __pyx_v_bufmode = -1; + + /* "View.MemoryView":185 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<< + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_t_1 = ((__pyx_v_flags & ((PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS) | PyBUF_ANY_CONTIGUOUS)) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":186 + * cdef int bufmode = -1 + * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_c, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 186, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":187 + * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + __pyx_v_bufmode = (PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":186 + * cdef int bufmode = -1 + * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): + * if self.mode == u"c": # <<<<<<<<<<<<<< + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + */ + goto __pyx_L4; + } + + /* "View.MemoryView":188 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + __pyx_t_1 = (__Pyx_PyUnicode_Equals(__pyx_v_self->mode, __pyx_n_u_fortran, Py_EQ)); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 188, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":189 + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS # <<<<<<<<<<<<<< + * if not (flags & bufmode): + * raise ValueError, "Can only create a buffer that is contiguous in memory." + */ + __pyx_v_bufmode = (PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS); + + /* "View.MemoryView":188 + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * elif self.mode == u"fortran": # <<<<<<<<<<<<<< + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + */ + } + __pyx_L4:; + + /* "View.MemoryView":190 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError, "Can only create a buffer that is contiguous in memory." + * info.buf = self.data + */ + __pyx_t_1 = (!((__pyx_v_flags & __pyx_v_bufmode) != 0)); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":191 + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): + * raise ValueError, "Can only create a buffer that is contiguous in memory." # <<<<<<<<<<<<<< + * info.buf = self.data + * info.len = self.len + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Can_only_create_a_buffer_that_is, 0, 0); + __PYX_ERR(1, 191, __pyx_L1_error) + + /* "View.MemoryView":190 + * elif self.mode == u"fortran": + * bufmode = PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + * if not (flags & bufmode): # <<<<<<<<<<<<<< + * raise ValueError, "Can only create a buffer that is contiguous in memory." + * info.buf = self.data + */ + } + + /* "View.MemoryView":185 + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + * if flags & (PyBUF_C_CONTIGUOUS | PyBUF_F_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS): # <<<<<<<<<<<<<< + * if self.mode == u"c": + * bufmode = PyBUF_C_CONTIGUOUS | PyBUF_ANY_CONTIGUOUS + */ + } + + /* "View.MemoryView":192 + * if not (flags & bufmode): + * raise ValueError, "Can only create a buffer that is contiguous in memory." + * info.buf = self.data # <<<<<<<<<<<<<< + * info.len = self.len + * + */ + __pyx_t_2 = __pyx_v_self->data; + __pyx_v_info->buf = __pyx_t_2; + + /* "View.MemoryView":193 + * raise ValueError, "Can only create a buffer that is contiguous in memory." + * info.buf = self.data + * info.len = self.len # <<<<<<<<<<<<<< + * + * if flags & PyBUF_STRIDES: + */ + __pyx_t_3 = __pyx_v_self->len; + __pyx_v_info->len = __pyx_t_3; + + /* "View.MemoryView":195 + * info.len = self.len + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.ndim = self.ndim + * info.shape = self._shape + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":196 + * + * if flags & PyBUF_STRIDES: + * info.ndim = self.ndim # <<<<<<<<<<<<<< + * info.shape = self._shape + * info.strides = self._strides + */ + __pyx_t_4 = __pyx_v_self->ndim; + __pyx_v_info->ndim = __pyx_t_4; + + /* "View.MemoryView":197 + * if flags & PyBUF_STRIDES: + * info.ndim = self.ndim + * info.shape = self._shape # <<<<<<<<<<<<<< + * info.strides = self._strides + * else: + */ + __pyx_t_5 = __pyx_v_self->_shape; + __pyx_v_info->shape = __pyx_t_5; + + /* "View.MemoryView":198 + * info.ndim = self.ndim + * info.shape = self._shape + * info.strides = self._strides # <<<<<<<<<<<<<< + * else: + * info.ndim = 1 + */ + __pyx_t_5 = __pyx_v_self->_strides; + __pyx_v_info->strides = __pyx_t_5; + + /* "View.MemoryView":195 + * info.len = self.len + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.ndim = self.ndim + * info.shape = self._shape + */ + goto __pyx_L6; + } + + /* "View.MemoryView":200 + * info.strides = self._strides + * else: + * info.ndim = 1 # <<<<<<<<<<<<<< + * info.shape = &self.len if flags & PyBUF_ND else NULL + * info.strides = NULL + */ + /*else*/ { + __pyx_v_info->ndim = 1; + + /* "View.MemoryView":201 + * else: + * info.ndim = 1 + * info.shape = &self.len if flags & PyBUF_ND else NULL # <<<<<<<<<<<<<< + * info.strides = NULL + * + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); + if (__pyx_t_1) { + __pyx_t_5 = (&__pyx_v_self->len); + } else { + __pyx_t_5 = NULL; + } + __pyx_v_info->shape = __pyx_t_5; + + /* "View.MemoryView":202 + * info.ndim = 1 + * info.shape = &self.len if flags & PyBUF_ND else NULL + * info.strides = NULL # <<<<<<<<<<<<<< + * + * info.suboffsets = NULL + */ + __pyx_v_info->strides = NULL; + } + __pyx_L6:; + + /* "View.MemoryView":204 + * info.strides = NULL + * + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * info.itemsize = self.itemsize + * info.readonly = 0 + */ + __pyx_v_info->suboffsets = NULL; + + /* "View.MemoryView":205 + * + * info.suboffsets = NULL + * info.itemsize = self.itemsize # <<<<<<<<<<<<<< + * info.readonly = 0 + * info.format = self.format if flags & PyBUF_FORMAT else NULL + */ + __pyx_t_3 = __pyx_v_self->itemsize; + __pyx_v_info->itemsize = __pyx_t_3; + + /* "View.MemoryView":206 + * info.suboffsets = NULL + * info.itemsize = self.itemsize + * info.readonly = 0 # <<<<<<<<<<<<<< + * info.format = self.format if flags & PyBUF_FORMAT else NULL + * info.obj = self + */ + __pyx_v_info->readonly = 0; + + /* "View.MemoryView":207 + * info.itemsize = self.itemsize + * info.readonly = 0 + * info.format = self.format if flags & PyBUF_FORMAT else NULL # <<<<<<<<<<<<<< + * info.obj = self + * + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + __pyx_t_2 = __pyx_v_self->format; + } else { + __pyx_t_2 = NULL; + } + __pyx_v_info->format = __pyx_t_2; + + /* "View.MemoryView":208 + * info.readonly = 0 + * info.format = self.format if flags & PyBUF_FORMAT else NULL + * info.obj = self # <<<<<<<<<<<<<< + * + * def __dealloc__(array self): + */ + __Pyx_INCREF((PyObject *)__pyx_v_self); + __Pyx_GIVEREF((PyObject *)__pyx_v_self); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":182 + * _allocate_buffer(self) + * + * @cname('getbuffer') # <<<<<<<<<<<<<< + * def __getbuffer__(self, Py_buffer *info, int flags): + * cdef int bufmode = -1 + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.array.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":210 + * info.obj = self + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + +/* Python wrapper */ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_array___dealloc__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_array___pyx_pf_15View_dot_MemoryView_5array_4__dealloc__(struct __pyx_array_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":211 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data and self.data is not NULL: + */ + __pyx_t_1 = (__pyx_v_self->callback_free_data != NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":212 + * def __dealloc__(array self): + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) # <<<<<<<<<<<<<< + * elif self.free_data and self.data is not NULL: + * if self.dtype_is_object: + */ + __pyx_v_self->callback_free_data(__pyx_v_self->data); + + /* "View.MemoryView":211 + * + * def __dealloc__(array self): + * if self.callback_free_data != NULL: # <<<<<<<<<<<<<< + * self.callback_free_data(self.data) + * elif self.free_data and self.data is not NULL: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":213 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + */ + if (__pyx_v_self->free_data) { + } else { + __pyx_t_1 = __pyx_v_self->free_data; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_self->data != NULL); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "View.MemoryView":214 + * self.callback_free_data(self.data) + * elif self.free_data and self.data is not NULL: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + * free(self.data) + */ + if (__pyx_v_self->dtype_is_object) { + + /* "View.MemoryView":215 + * elif self.free_data and self.data is not NULL: + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) # <<<<<<<<<<<<<< + * free(self.data) + * PyObject_Free(self._shape) + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_self->data, __pyx_v_self->_shape, __pyx_v_self->_strides, __pyx_v_self->ndim, 0); + + /* "View.MemoryView":214 + * self.callback_free_data(self.data) + * elif self.free_data and self.data is not NULL: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + * free(self.data) + */ + } + + /* "View.MemoryView":216 + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + * free(self.data) # <<<<<<<<<<<<<< + * PyObject_Free(self._shape) + * + */ + free(__pyx_v_self->data); + + /* "View.MemoryView":213 + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + * elif self.free_data and self.data is not NULL: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + */ + } + __pyx_L3:; + + /* "View.MemoryView":217 + * refcount_objects_in_slice(self.data, self._shape, self._strides, self.ndim, inc=False) + * free(self.data) + * PyObject_Free(self._shape) # <<<<<<<<<<<<<< + * + * @property + */ + PyObject_Free(__pyx_v_self->_shape); + + /* "View.MemoryView":210 + * info.obj = self + * + * def __dealloc__(array self): # <<<<<<<<<<<<<< + * if self.callback_free_data != NULL: + * self.callback_free_data(self.data) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":219 + * PyObject_Free(self._shape) + * + * @property # <<<<<<<<<<<<<< + * def memview(self): + * return self.get_memview() + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_5array_7memview___get__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_5array_7memview___get__(struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":221 + * @property + * def memview(self): + * return self.get_memview() # <<<<<<<<<<<<<< + * + * @cname('get_memview') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_array *)__pyx_v_self->__pyx_vtab)->get_memview(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 221, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":219 + * PyObject_Free(self._shape) + * + * @property # <<<<<<<<<<<<<< + * def memview(self): + * return self.get_memview() + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.memview.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":224 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + +static PyObject *__pyx_array_get_memview(struct __pyx_array_obj *__pyx_v_self) { + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_memview", 0); + + /* "View.MemoryView":225 + * @cname('get_memview') + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE # <<<<<<<<<<<<<< + * return memoryview(self, flags, self.dtype_is_object) + * + */ + __pyx_v_flags = ((PyBUF_ANY_CONTIGUOUS | PyBUF_FORMAT) | PyBUF_WRITABLE); + + /* "View.MemoryView":226 + * cdef get_memview(self): + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF((PyObject *)__pyx_v_self); + __Pyx_GIVEREF((PyObject *)__pyx_v_self); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, ((PyObject *)__pyx_v_self))) __PYX_ERR(1, 226, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1)) __PYX_ERR(1, 226, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 226, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":224 + * + * @cname('get_memview') + * cdef get_memview(self): # <<<<<<<<<<<<<< + * flags = PyBUF_ANY_CONTIGUOUS|PyBUF_FORMAT|PyBUF_WRITABLE + * return memoryview(self, flags, self.dtype_is_object) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.array.get_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":228 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_array___len__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_array___pyx_pf_15View_dot_MemoryView_5array_6__len__(struct __pyx_array_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":229 + * + * def __len__(self): + * return self._shape[0] # <<<<<<<<<<<<<< + * + * def __getattr__(self, attr): + */ + __pyx_r = (__pyx_v_self->_shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":228 + * return memoryview(self, flags, self.dtype_is_object) + * + * def __len__(self): # <<<<<<<<<<<<<< + * return self._shape[0] + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":231 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr); /*proto*/ +static PyObject *__pyx_array___getattr__(PyObject *__pyx_v_self, PyObject *__pyx_v_attr) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getattr__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_attr)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_8__getattr__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_attr) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getattr__", 0); + + /* "View.MemoryView":232 + * + * def __getattr__(self, attr): + * return getattr(self.memview, attr) # <<<<<<<<<<<<<< + * + * def __getitem__(self, item): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 232, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetAttr(__pyx_t_1, __pyx_v_attr); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 232, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":231 + * return self._shape[0] + * + * def __getattr__(self, attr): # <<<<<<<<<<<<<< + * return getattr(self.memview, attr) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getattr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":234 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + +/* Python wrapper */ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item); /*proto*/ +static PyObject *__pyx_array___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_array___pyx_pf_15View_dot_MemoryView_5array_10__getitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":235 + * + * def __getitem__(self, item): + * return self.memview[item] # <<<<<<<<<<<<<< + * + * def __setitem__(self, item, value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetItem(__pyx_t_1, __pyx_v_item); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 235, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":234 + * return getattr(self.memview, attr) + * + * def __getitem__(self, item): # <<<<<<<<<<<<<< + * return self.memview[item] + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.array.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":237 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + +/* Python wrapper */ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_array___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(((struct __pyx_array_obj *)__pyx_v_self), ((PyObject *)__pyx_v_item), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_array___pyx_pf_15View_dot_MemoryView_5array_12__setitem__(struct __pyx_array_obj *__pyx_v_self, PyObject *__pyx_v_item, PyObject *__pyx_v_value) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + + /* "View.MemoryView":238 + * + * def __setitem__(self, item, value): + * self.memview[item] = value # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_memview); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 238, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (unlikely((PyObject_SetItem(__pyx_t_1, __pyx_v_item, __pyx_v_value) < 0))) __PYX_ERR(1, 238, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":237 + * return self.memview[item] + * + * def __setitem__(self, item, value): # <<<<<<<<<<<<<< + * self.memview[item] = value + * + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.array.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_array_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 1, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_array___reduce_cython__(((struct __pyx_array_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array___reduce_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.array.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_array_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 3, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v___pyx_state = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_array_2__setstate_cython__(((struct __pyx_array_obj *)__pyx_v_self), __pyx_v___pyx_state); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_array_2__setstate_cython__(CYTHON_UNUSED struct __pyx_array_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.array.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":248 + * + * @cname("__pyx_array_allocate_buffer") + * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<< + * + * + */ + +static int __pyx_array_allocate_buffer(struct __pyx_array_obj *__pyx_v_self) { + Py_ssize_t __pyx_v_i; + PyObject **__pyx_v_p; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_allocate_buffer", 0); + + /* "View.MemoryView":254 + * cdef PyObject **p + * + * self.free_data = True # <<<<<<<<<<<<<< + * self.data = malloc(self.len) + * if not self.data: + */ + __pyx_v_self->free_data = 1; + + /* "View.MemoryView":255 + * + * self.free_data = True + * self.data = malloc(self.len) # <<<<<<<<<<<<<< + * if not self.data: + * raise MemoryError, "unable to allocate array data." + */ + __pyx_v_self->data = ((char *)malloc(__pyx_v_self->len)); + + /* "View.MemoryView":256 + * self.free_data = True + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError, "unable to allocate array data." + * + */ + __pyx_t_1 = (!(__pyx_v_self->data != 0)); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":257 + * self.data = malloc(self.len) + * if not self.data: + * raise MemoryError, "unable to allocate array data." # <<<<<<<<<<<<<< + * + * if self.dtype_is_object: + */ + __Pyx_Raise(__pyx_builtin_MemoryError, __pyx_kp_s_unable_to_allocate_array_data, 0, 0); + __PYX_ERR(1, 257, __pyx_L1_error) + + /* "View.MemoryView":256 + * self.free_data = True + * self.data = malloc(self.len) + * if not self.data: # <<<<<<<<<<<<<< + * raise MemoryError, "unable to allocate array data." + * + */ + } + + /* "View.MemoryView":259 + * raise MemoryError, "unable to allocate array data." + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len // self.itemsize): + */ + if (__pyx_v_self->dtype_is_object) { + + /* "View.MemoryView":260 + * + * if self.dtype_is_object: + * p = self.data # <<<<<<<<<<<<<< + * for i in range(self.len // self.itemsize): + * p[i] = Py_None + */ + __pyx_v_p = ((PyObject **)__pyx_v_self->data); + + /* "View.MemoryView":261 + * if self.dtype_is_object: + * p = self.data + * for i in range(self.len // self.itemsize): # <<<<<<<<<<<<<< + * p[i] = Py_None + * Py_INCREF(Py_None) + */ + if (unlikely(__pyx_v_self->itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 261, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_self->itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_self->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 261, __pyx_L1_error) + } + __pyx_t_2 = __Pyx_div_Py_ssize_t(__pyx_v_self->len, __pyx_v_self->itemsize); + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":262 + * p = self.data + * for i in range(self.len // self.itemsize): + * p[i] = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * return 0 + */ + (__pyx_v_p[__pyx_v_i]) = Py_None; + + /* "View.MemoryView":263 + * for i in range(self.len // self.itemsize): + * p[i] = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * return 0 + * + */ + Py_INCREF(Py_None); + } + + /* "View.MemoryView":259 + * raise MemoryError, "unable to allocate array data." + * + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * p = self.data + * for i in range(self.len // self.itemsize): + */ + } + + /* "View.MemoryView":264 + * p[i] = Py_None + * Py_INCREF(Py_None) + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":248 + * + * @cname("__pyx_array_allocate_buffer") + * cdef int _allocate_buffer(array self) except -1: # <<<<<<<<<<<<<< + * + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView._allocate_buffer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":268 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<< + * cdef array result + * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. + */ + +static struct __pyx_array_obj *__pyx_array_new(PyObject *__pyx_v_shape, Py_ssize_t __pyx_v_itemsize, char *__pyx_v_format, char *__pyx_v_c_mode, char *__pyx_v_buf) { + struct __pyx_array_obj *__pyx_v_result = 0; + PyObject *__pyx_v_mode = 0; + struct __pyx_array_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("array_cwrapper", 0); + + /* "View.MemoryView":270 + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): + * cdef array result + * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. # <<<<<<<<<<<<<< + * + * if buf is NULL: + */ + __pyx_t_2 = ((__pyx_v_c_mode[0]) == 'f'); + if (__pyx_t_2) { + __Pyx_INCREF(__pyx_n_s_fortran); + __pyx_t_1 = __pyx_n_s_fortran; + } else { + __Pyx_INCREF(__pyx_n_s_c); + __pyx_t_1 = __pyx_n_s_c; + } + __pyx_v_mode = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":272 + * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. + * + * if buf is NULL: # <<<<<<<<<<<<<< + * result = array.__new__(array, shape, itemsize, format, mode) + * else: + */ + __pyx_t_2 = (__pyx_v_buf == NULL); + if (__pyx_t_2) { + + /* "View.MemoryView":273 + * + * if buf is NULL: + * result = array.__new__(array, shape, itemsize, format, mode) # <<<<<<<<<<<<<< + * else: + * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) + */ + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 273, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 273, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_v_shape)) __PYX_ERR(1, 273, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1)) __PYX_ERR(1, 273, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_mode); + __Pyx_GIVEREF(__pyx_v_mode); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 3, __pyx_v_mode)) __PYX_ERR(1, 273, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_3 = 0; + __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_4, NULL)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 273, __pyx_L1_error) + __Pyx_GOTREF((PyObject *)__pyx_t_3); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":272 + * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. + * + * if buf is NULL: # <<<<<<<<<<<<<< + * result = array.__new__(array, shape, itemsize, format, mode) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":275 + * result = array.__new__(array, shape, itemsize, format, mode) + * else: + * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) # <<<<<<<<<<<<<< + * result.data = buf + * + */ + /*else*/ { + __pyx_t_3 = PyInt_FromSsize_t(__pyx_v_itemsize); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_PyBytes_FromString(__pyx_v_format); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyTuple_New(4); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 275, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_shape); + __Pyx_GIVEREF(__pyx_v_shape); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_shape)) __PYX_ERR(1, 275, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_mode); + __Pyx_GIVEREF(__pyx_v_mode); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_v_mode)) __PYX_ERR(1, 275, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 275, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_allocate_buffer, Py_False) < 0) __PYX_ERR(1, 275, __pyx_L1_error) + __pyx_t_3 = ((PyObject *)__pyx_tp_new_array(((PyTypeObject *)__pyx_array_type), __pyx_t_1, __pyx_t_4)); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 275, __pyx_L1_error) + __Pyx_GOTREF((PyObject *)__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_result = ((struct __pyx_array_obj *)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":276 + * else: + * result = array.__new__(array, shape, itemsize, format, mode, allocate_buffer=False) + * result.data = buf # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->data = __pyx_v_buf; + } + __pyx_L3:; + + /* "View.MemoryView":278 + * result.data = buf + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF((PyObject *)__pyx_r); + __Pyx_INCREF((PyObject *)__pyx_v_result); + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":268 + * + * @cname("__pyx_array_new") + * cdef array array_cwrapper(tuple shape, Py_ssize_t itemsize, char *format, char *c_mode, char *buf): # <<<<<<<<<<<<<< + * cdef array result + * cdef str mode = "fortran" if c_mode[0] == b'f' else "c" # this often comes from a constant C string. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.array_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XDECREF(__pyx_v_mode); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":304 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + +/* Python wrapper */ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_MemviewEnum___init__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_name = 0; + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__ (wrapper)", 0); + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 304, __pyx_L3_error) + #endif + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_name,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_name)) != 0)) { + (void)__Pyx_Arg_NewRef_VARARGS(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 304, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__init__") < 0)) __PYX_ERR(1, 304, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); + } + __pyx_v_name = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__init__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 304, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_VARARGS(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.Enum.__init__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v_name); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_VARARGS(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum___init__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v_name) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__init__", 0); + + /* "View.MemoryView":305 + * cdef object name + * def __init__(self, name): + * self.name = name # <<<<<<<<<<<<<< + * def __repr__(self): + * return self.name + */ + __Pyx_INCREF(__pyx_v_name); + __Pyx_GIVEREF(__pyx_v_name); + __Pyx_GOTREF(__pyx_v_self->name); + __Pyx_DECREF(__pyx_v_self->name); + __pyx_v_self->name = __pyx_v_name; + + /* "View.MemoryView":304 + * cdef class Enum(object): + * cdef object name + * def __init__(self, name): # <<<<<<<<<<<<<< + * self.name = name + * def __repr__(self): + */ + + /* function exit code */ + __pyx_r = 0; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":306 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + +/* Python wrapper */ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_MemviewEnum___repr__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_MemviewEnum___pyx_pf_15View_dot_MemoryView_4Enum_2__repr__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":307 + * self.name = name + * def __repr__(self): + * return self.name # <<<<<<<<<<<<<< + * + * cdef generic = Enum("") + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->name); + __pyx_r = __pyx_v_self->name; + goto __pyx_L0; + + /* "View.MemoryView":306 + * def __init__(self, name): + * self.name = name + * def __repr__(self): # <<<<<<<<<<<<<< + * return self.name + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 1, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_MemviewEnum___reduce_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum___reduce_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self) { + PyObject *__pyx_v_state = 0; + PyObject *__pyx_v__dict = 0; + int __pyx_v_use_setstate; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":5 + * cdef object _dict + * cdef bint use_setstate + * state = (self.name,) # <<<<<<<<<<<<<< + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v_self->name); + __Pyx_GIVEREF(__pyx_v_self->name); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v_self->name)) __PYX_ERR(1, 5, __pyx_L1_error); + __pyx_v_state = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "(tree fragment)":6 + * cdef bint use_setstate + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) # <<<<<<<<<<<<<< + * if _dict is not None: + * state += (_dict,) + */ + __pyx_t_1 = __Pyx_GetAttr3(((PyObject *)__pyx_v_self), __pyx_n_s_dict, Py_None); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v__dict = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + __pyx_t_2 = (__pyx_v__dict != Py_None); + if (__pyx_t_2) { + + /* "(tree fragment)":8 + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: + * state += (_dict,) # <<<<<<<<<<<<<< + * use_setstate = True + * else: + */ + __pyx_t_1 = PyTuple_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_v__dict); + __Pyx_GIVEREF(__pyx_v__dict); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_v__dict)) __PYX_ERR(1, 8, __pyx_L1_error); + __pyx_t_3 = PyNumber_InPlaceAdd(__pyx_v_state, __pyx_t_1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 8, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF_SET(__pyx_v_state, ((PyObject*)__pyx_t_3)); + __pyx_t_3 = 0; + + /* "(tree fragment)":9 + * if _dict is not None: + * state += (_dict,) + * use_setstate = True # <<<<<<<<<<<<<< + * else: + * use_setstate = self.name is not None + */ + __pyx_v_use_setstate = 1; + + /* "(tree fragment)":7 + * state = (self.name,) + * _dict = getattr(self, '__dict__', None) + * if _dict is not None: # <<<<<<<<<<<<<< + * state += (_dict,) + * use_setstate = True + */ + goto __pyx_L3; + } + + /* "(tree fragment)":11 + * use_setstate = True + * else: + * use_setstate = self.name is not None # <<<<<<<<<<<<<< + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_self->name != Py_None); + __pyx_v_use_setstate = __pyx_t_2; + } + __pyx_L3:; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state + * else: + */ + if (__pyx_v_use_setstate) { + + /* "(tree fragment)":13 + * use_setstate = self.name is not None + * if use_setstate: + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state # <<<<<<<<<<<<<< + * else: + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))))) __PYX_ERR(1, 13, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_136983863); + __Pyx_GIVEREF(__pyx_int_136983863); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863)) __PYX_ERR(1, 13, __pyx_L1_error); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, Py_None)) __PYX_ERR(1, 13, __pyx_L1_error); + __pyx_t_4 = PyTuple_New(3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 13, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(1, 13, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_t_1)) __PYX_ERR(1, 13, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 2, __pyx_v_state)) __PYX_ERR(1, 13, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "(tree fragment)":12 + * else: + * use_setstate = self.name is not None + * if use_setstate: # <<<<<<<<<<<<<< + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state + * else: + */ + } + + /* "(tree fragment)":15 + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, None), state + * else: + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_GetModuleGlobalName(__pyx_t_4, __pyx_n_s_pyx_unpickle_Enum); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = PyTuple_New(3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + __Pyx_GIVEREF(((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self)))); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 0, ((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))))) __PYX_ERR(1, 15, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_136983863); + __Pyx_GIVEREF(__pyx_int_136983863); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_int_136983863)) __PYX_ERR(1, 15, __pyx_L1_error); + __Pyx_INCREF(__pyx_v_state); + __Pyx_GIVEREF(__pyx_v_state); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_v_state)) __PYX_ERR(1, 15, __pyx_L1_error); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_4)) __PYX_ERR(1, 15, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1)) __PYX_ERR(1, 15, __pyx_L1_error); + __pyx_t_4 = 0; + __pyx_t_1 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + } + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * cdef tuple state + * cdef object _dict + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.Enum.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_state); + __Pyx_XDECREF(__pyx_v__dict); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_MemviewEnum_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v___pyx_state = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 16, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 16, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 16, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v___pyx_state = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 16, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_MemviewEnum_2__setstate_cython__(((struct __pyx_MemviewEnum_obj *)__pyx_v_self), __pyx_v___pyx_state); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_MemviewEnum_2__setstate_cython__(struct __pyx_MemviewEnum_obj *__pyx_v_self, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":17 + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) + * def __setstate_cython__(self, __pyx_state): + * __pyx_unpickle_Enum__set_state(self, __pyx_state) # <<<<<<<<<<<<<< + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_v___pyx_state))) __PYX_ERR(1, 17, __pyx_L1_error) + __pyx_t_1 = __pyx_unpickle_Enum__set_state(__pyx_v_self, ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":16 + * else: + * return __pyx_unpickle_Enum, (type(self), 0x82a3537, state) + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state(self, __pyx_state) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.Enum.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":349 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + +/* Python wrapper */ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds); /*proto*/ +static int __pyx_memoryview___cinit__(PyObject *__pyx_v_self, PyObject *__pyx_args, PyObject *__pyx_kwds) { + PyObject *__pyx_v_obj = 0; + int __pyx_v_flags; + int __pyx_v_dtype_is_object; + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[3] = {0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__cinit__ (wrapper)", 0); + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 349, __pyx_L3_error) + #endif + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_obj,&__pyx_n_s_flags,&__pyx_n_s_dtype_is_object,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_VARARGS(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_obj)) != 0)) { + (void)__Pyx_Arg_NewRef_VARARGS(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_flags)) != 0)) { + (void)__Pyx_Arg_NewRef_VARARGS(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, 1); __PYX_ERR(1, 349, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (kw_args > 0) { + PyObject* value = __Pyx_GetKwValue_VARARGS(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_dtype_is_object); + if (value) { values[2] = __Pyx_Arg_NewRef_VARARGS(value); kw_args--; } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__cinit__") < 0)) __PYX_ERR(1, 349, __pyx_L3_error) + } + } else { + switch (__pyx_nargs) { + case 3: values[2] = __Pyx_Arg_VARARGS(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_VARARGS(__pyx_args, 1); + values[0] = __Pyx_Arg_VARARGS(__pyx_args, 0); + break; + default: goto __pyx_L5_argtuple_error; + } + } + __pyx_v_obj = values[0]; + __pyx_v_flags = __Pyx_PyInt_As_int(values[1]); if (unlikely((__pyx_v_flags == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) + if (values[2]) { + __pyx_v_dtype_is_object = __Pyx_PyObject_IsTrue(values[2]); if (unlikely((__pyx_v_dtype_is_object == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 349, __pyx_L3_error) + } else { + __pyx_v_dtype_is_object = ((int)0); + } + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__cinit__", 0, 2, 3, __pyx_nargs); __PYX_ERR(1, 349, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_VARARGS(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return -1; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_obj, __pyx_v_flags, __pyx_v_dtype_is_object); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_VARARGS(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview___cinit__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj, int __pyx_v_flags, int __pyx_v_dtype_is_object) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + Py_intptr_t __pyx_t_4; + size_t __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__cinit__", 0); + + /* "View.MemoryView":350 + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj # <<<<<<<<<<<<<< + * self.flags = flags + * if type(self) is memoryview or obj is not None: + */ + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + __Pyx_GOTREF(__pyx_v_self->obj); + __Pyx_DECREF(__pyx_v_self->obj); + __pyx_v_self->obj = __pyx_v_obj; + + /* "View.MemoryView":351 + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): + * self.obj = obj + * self.flags = flags # <<<<<<<<<<<<<< + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + */ + __pyx_v_self->flags = __pyx_v_flags; + + /* "View.MemoryView":352 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + __pyx_t_2 = (((PyObject *)Py_TYPE(((PyObject *)__pyx_v_self))) == ((PyObject *)__pyx_memoryview_type)); + if (!__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_obj != Py_None); + __pyx_t_1 = __pyx_t_2; + __pyx_L4_bool_binop_done:; + if (__pyx_t_1) { + + /* "View.MemoryView":353 + * self.flags = flags + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) # <<<<<<<<<<<<<< + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + */ + __pyx_t_3 = __Pyx_GetBuffer(__pyx_v_obj, (&__pyx_v_self->view), __pyx_v_flags); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 353, __pyx_L1_error) + + /* "View.MemoryView":354 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_t_1 = (((PyObject *)__pyx_v_self->view.obj) == NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":355 + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = Py_None; + + /* "View.MemoryView":356 + * if self.view.obj == NULL: + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * if not __PYX_CYTHON_ATOMICS_ENABLED(): + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":354 + * if type(self) is memoryview or obj is not None: + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &self.view).obj = Py_None + * Py_INCREF(Py_None) + */ + } + + /* "View.MemoryView":352 + * self.obj = obj + * self.flags = flags + * if type(self) is memoryview or obj is not None: # <<<<<<<<<<<<<< + * __Pyx_GetBuffer(obj, &self.view, flags) + * if self.view.obj == NULL: + */ + } + + /* "View.MemoryView":358 + * Py_INCREF(Py_None) + * + * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<< + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < 8: + */ + __pyx_t_1 = (!__PYX_CYTHON_ATOMICS_ENABLED()); + if (__pyx_t_1) { + + /* "View.MemoryView":360 + * if not __PYX_CYTHON_ATOMICS_ENABLED(): + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + __pyx_t_1 = (__pyx_memoryview_thread_locks_used < 8); + if (__pyx_t_1) { + + /* "View.MemoryView":361 + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < 8: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + */ + __pyx_v_self->lock = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + + /* "View.MemoryView":362 + * if __pyx_memoryview_thread_locks_used < 8: + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 # <<<<<<<<<<<<<< + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used + 1); + + /* "View.MemoryView":360 + * if not __PYX_CYTHON_ATOMICS_ENABLED(): + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < 8: # <<<<<<<<<<<<<< + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + */ + } + + /* "View.MemoryView":363 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + __pyx_t_1 = (__pyx_v_self->lock == NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":364 + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() # <<<<<<<<<<<<<< + * if self.lock is NULL: + * raise MemoryError + */ + __pyx_v_self->lock = PyThread_allocate_lock(); + + /* "View.MemoryView":365 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + __pyx_t_1 = (__pyx_v_self->lock == NULL); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":366 + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + PyErr_NoMemory(); __PYX_ERR(1, 366, __pyx_L1_error) + + /* "View.MemoryView":365 + * if self.lock is NULL: + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + } + + /* "View.MemoryView":363 + * self.lock = __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] + * __pyx_memoryview_thread_locks_used += 1 + * if self.lock is NULL: # <<<<<<<<<<<<<< + * self.lock = PyThread_allocate_lock() + * if self.lock is NULL: + */ + } + + /* "View.MemoryView":358 + * Py_INCREF(Py_None) + * + * if not __PYX_CYTHON_ATOMICS_ENABLED(): # <<<<<<<<<<<<<< + * global __pyx_memoryview_thread_locks_used + * if __pyx_memoryview_thread_locks_used < 8: + */ + } + + /* "View.MemoryView":368 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":369 + * + * if flags & PyBUF_FORMAT: + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') # <<<<<<<<<<<<<< + * else: + * self.dtype_is_object = dtype_is_object + */ + __pyx_t_2 = ((__pyx_v_self->view.format[0]) == 'O'); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L12_bool_binop_done; + } + __pyx_t_2 = ((__pyx_v_self->view.format[1]) == '\x00'); + __pyx_t_1 = __pyx_t_2; + __pyx_L12_bool_binop_done:; + __pyx_v_self->dtype_is_object = __pyx_t_1; + + /* "View.MemoryView":368 + * raise MemoryError + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + */ + goto __pyx_L11; + } + + /* "View.MemoryView":371 + * self.dtype_is_object = (self.view.format[0] == b'O' and self.view.format[1] == b'\0') + * else: + * self.dtype_is_object = dtype_is_object # <<<<<<<<<<<<<< + * + * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 + */ + /*else*/ { + __pyx_v_self->dtype_is_object = __pyx_v_dtype_is_object; + } + __pyx_L11:; + + /* "View.MemoryView":373 + * self.dtype_is_object = dtype_is_object + * + * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 # <<<<<<<<<<<<<< + * self.typeinfo = NULL + * + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(__pyx_assertions_enabled())) { + __pyx_t_4 = ((Py_intptr_t)((void *)(&__pyx_v_self->acquisition_count))); + __pyx_t_5 = (sizeof(__pyx_atomic_int_type)); + if (unlikely(__pyx_t_5 == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 373, __pyx_L1_error) + } + __pyx_t_1 = ((__pyx_t_4 % __pyx_t_5) == 0); + if (unlikely(!__pyx_t_1)) { + __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0); + __PYX_ERR(1, 373, __pyx_L1_error) + } + } + #else + if ((1)); else __PYX_ERR(1, 373, __pyx_L1_error) + #endif + + /* "View.MemoryView":374 + * + * assert (&self.acquisition_count) % sizeof(__pyx_atomic_int_type) == 0 + * self.typeinfo = NULL # <<<<<<<<<<<<<< + * + * def __dealloc__(memoryview self): + */ + __pyx_v_self->typeinfo = NULL; + + /* "View.MemoryView":349 + * cdef __Pyx_TypeInfo *typeinfo + * + * def __cinit__(memoryview self, object obj, int flags, bint dtype_is_object=False): # <<<<<<<<<<<<<< + * self.obj = obj + * self.flags = flags + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__cinit__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":376 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + +/* Python wrapper */ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryview___dealloc__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_2__dealloc__(struct __pyx_memoryview_obj *__pyx_v_self) { + int __pyx_v_i; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + PyThread_type_lock __pyx_t_5; + PyThread_type_lock __pyx_t_6; + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":377 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + __pyx_t_1 = (__pyx_v_self->obj != Py_None); + if (__pyx_t_1) { + + /* "View.MemoryView":378 + * def __dealloc__(memoryview self): + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) # <<<<<<<<<<<<<< + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + */ + __Pyx_ReleaseBuffer((&__pyx_v_self->view)); + + /* "View.MemoryView":377 + * + * def __dealloc__(memoryview self): + * if self.obj is not None: # <<<<<<<<<<<<<< + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":379 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + __pyx_t_1 = (((Py_buffer *)(&__pyx_v_self->view))->obj == Py_None); + if (__pyx_t_1) { + + /* "View.MemoryView":381 + * elif (<__pyx_buffer *> &self.view).obj == Py_None: + * + * (<__pyx_buffer *> &self.view).obj = NULL # <<<<<<<<<<<<<< + * Py_DECREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_self->view))->obj = NULL; + + /* "View.MemoryView":382 + * + * (<__pyx_buffer *> &self.view).obj = NULL + * Py_DECREF(Py_None) # <<<<<<<<<<<<<< + * + * cdef int i + */ + Py_DECREF(Py_None); + + /* "View.MemoryView":379 + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + * elif (<__pyx_buffer *> &self.view).obj == Py_None: # <<<<<<<<<<<<<< + * + * (<__pyx_buffer *> &self.view).obj = NULL + */ + } + __pyx_L3:; + + /* "View.MemoryView":386 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + __pyx_t_1 = (__pyx_v_self->lock != NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":387 + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): # <<<<<<<<<<<<<< + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + */ + __pyx_t_2 = __pyx_memoryview_thread_locks_used; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":388 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + __pyx_t_1 = ((__pyx_memoryview_thread_locks[__pyx_v_i]) == __pyx_v_self->lock); + if (__pyx_t_1) { + + /* "View.MemoryView":389 + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 # <<<<<<<<<<<<<< + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + */ + __pyx_memoryview_thread_locks_used = (__pyx_memoryview_thread_locks_used - 1); + + /* "View.MemoryView":390 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + __pyx_t_1 = (__pyx_v_i != __pyx_memoryview_thread_locks_used); + if (__pyx_t_1) { + + /* "View.MemoryView":392 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) # <<<<<<<<<<<<<< + * break + * else: + */ + __pyx_t_5 = (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]); + __pyx_t_6 = (__pyx_memoryview_thread_locks[__pyx_v_i]); + + /* "View.MemoryView":391 + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break + */ + (__pyx_memoryview_thread_locks[__pyx_v_i]) = __pyx_t_5; + (__pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used]) = __pyx_t_6; + + /* "View.MemoryView":390 + * if __pyx_memoryview_thread_locks[i] is self.lock: + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + */ + } + + /* "View.MemoryView":393 + * __pyx_memoryview_thread_locks[i], __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used] = ( + * __pyx_memoryview_thread_locks[__pyx_memoryview_thread_locks_used], __pyx_memoryview_thread_locks[i]) + * break # <<<<<<<<<<<<<< + * else: + * PyThread_free_lock(self.lock) + */ + goto __pyx_L6_break; + + /* "View.MemoryView":388 + * if self.lock != NULL: + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: # <<<<<<<<<<<<<< + * __pyx_memoryview_thread_locks_used -= 1 + * if i != __pyx_memoryview_thread_locks_used: + */ + } + } + /*else*/ { + + /* "View.MemoryView":395 + * break + * else: + * PyThread_free_lock(self.lock) # <<<<<<<<<<<<<< + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + */ + PyThread_free_lock(__pyx_v_self->lock); + } + __pyx_L6_break:; + + /* "View.MemoryView":386 + * cdef int i + * global __pyx_memoryview_thread_locks_used + * if self.lock != NULL: # <<<<<<<<<<<<<< + * for i in range(__pyx_memoryview_thread_locks_used): + * if __pyx_memoryview_thread_locks[i] is self.lock: + */ + } + + /* "View.MemoryView":376 + * self.typeinfo = NULL + * + * def __dealloc__(memoryview self): # <<<<<<<<<<<<<< + * if self.obj is not None: + * __Pyx_ReleaseBuffer(&self.view) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":397 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + +static char *__pyx_memoryview_get_item_pointer(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + Py_ssize_t __pyx_v_dim; + char *__pyx_v_itemp; + PyObject *__pyx_v_idx = NULL; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t __pyx_t_3; + PyObject *(*__pyx_t_4)(PyObject *); + PyObject *__pyx_t_5 = NULL; + Py_ssize_t __pyx_t_6; + char *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_item_pointer", 0); + + /* "View.MemoryView":399 + * cdef char *get_item_pointer(memoryview self, object index) except NULL: + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf # <<<<<<<<<<<<<< + * + * for dim, idx in enumerate(index): + */ + __pyx_v_itemp = ((char *)__pyx_v_self->view.buf); + + /* "View.MemoryView":401 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + __pyx_t_1 = 0; + if (likely(PyList_CheckExact(__pyx_v_index)) || PyTuple_CheckExact(__pyx_v_index)) { + __pyx_t_2 = __pyx_v_index; __Pyx_INCREF(__pyx_t_2); __pyx_t_3 = 0; + __pyx_t_4 = NULL; + } else { + __pyx_t_3 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_index); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 401, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 401, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_4)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_3 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } else { + if (__pyx_t_3 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_5 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_3); __Pyx_INCREF(__pyx_t_5); __pyx_t_3++; if (unlikely((0 < 0))) __PYX_ERR(1, 401, __pyx_L1_error) + #else + __pyx_t_5 = PySequence_ITEM(__pyx_t_2, __pyx_t_3); __pyx_t_3++; if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 401, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + #endif + } + } else { + __pyx_t_5 = __pyx_t_4(__pyx_t_2); + if (unlikely(!__pyx_t_5)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 401, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_5); + } + __Pyx_XDECREF_SET(__pyx_v_idx, __pyx_t_5); + __pyx_t_5 = 0; + __pyx_v_dim = __pyx_t_1; + __pyx_t_1 = (__pyx_t_1 + 1); + + /* "View.MemoryView":402 + * + * for dim, idx in enumerate(index): + * itemp = pybuffer_index(&self.view, itemp, idx, dim) # <<<<<<<<<<<<<< + * + * return itemp + */ + __pyx_t_6 = __Pyx_PyIndex_AsSsize_t(__pyx_v_idx); if (unlikely((__pyx_t_6 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 402, __pyx_L1_error) + __pyx_t_7 = __pyx_pybuffer_index((&__pyx_v_self->view), __pyx_v_itemp, __pyx_t_6, __pyx_v_dim); if (unlikely(__pyx_t_7 == ((char *)NULL))) __PYX_ERR(1, 402, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_7; + + /* "View.MemoryView":401 + * cdef char *itemp = self.view.buf + * + * for dim, idx in enumerate(index): # <<<<<<<<<<<<<< + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":404 + * itemp = pybuffer_index(&self.view, itemp, idx, dim) + * + * return itemp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_itemp; + goto __pyx_L0; + + /* "View.MemoryView":397 + * PyThread_free_lock(self.lock) + * + * cdef char *get_item_pointer(memoryview self, object index) except NULL: # <<<<<<<<<<<<<< + * cdef Py_ssize_t dim + * cdef char *itemp = self.view.buf + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.get_item_pointer", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_idx); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":407 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index); /*proto*/ +static PyObject *__pyx_memoryview___getitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getitem__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_4__getitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_indices = NULL; + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + char *__pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__getitem__", 0); + + /* "View.MemoryView":408 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + __pyx_t_1 = (__pyx_v_index == __pyx_builtin_Ellipsis); + if (__pyx_t_1) { + + /* "View.MemoryView":409 + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: + * return self # <<<<<<<<<<<<<< + * + * have_slices, indices = _unellipsify(index, self.view.ndim) + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF((PyObject *)__pyx_v_self); + __pyx_r = ((PyObject *)__pyx_v_self); + goto __pyx_L0; + + /* "View.MemoryView":408 + * + * def __getitem__(memoryview self, object index): + * if index is Ellipsis: # <<<<<<<<<<<<<< + * return self + * + */ + } + + /* "View.MemoryView":411 + * return self + * + * have_slices, indices = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * cdef char *itemp + */ + __pyx_t_2 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (likely(__pyx_t_2 != Py_None)) { + PyObject* sequence = __pyx_t_2; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 411, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_4 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + #else + __pyx_t_3 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 411, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + #endif + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 411, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_3; + __pyx_t_3 = 0; + __pyx_v_indices = __pyx_t_4; + __pyx_t_4 = 0; + + /* "View.MemoryView":414 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 414, __pyx_L1_error) + if (__pyx_t_1) { + + /* "View.MemoryView":415 + * cdef char *itemp + * if have_slices: + * return memview_slice(self, indices) # <<<<<<<<<<<<<< + * else: + * itemp = self.get_item_pointer(indices) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = ((PyObject *)__pyx_memview_slice(__pyx_v_self, __pyx_v_indices)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 415, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":414 + * + * cdef char *itemp + * if have_slices: # <<<<<<<<<<<<<< + * return memview_slice(self, indices) + * else: + */ + } + + /* "View.MemoryView":417 + * return memview_slice(self, indices) + * else: + * itemp = self.get_item_pointer(indices) # <<<<<<<<<<<<<< + * return self.convert_item_to_object(itemp) + * + */ + /*else*/ { + __pyx_t_5 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_indices); if (unlikely(__pyx_t_5 == ((char *)NULL))) __PYX_ERR(1, 417, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_5; + + /* "View.MemoryView":418 + * else: + * itemp = self.get_item_pointer(indices) + * return self.convert_item_to_object(itemp) # <<<<<<<<<<<<<< + * + * def __setitem__(memoryview self, object index, object value): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->convert_item_to_object(__pyx_v_self, __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 418, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":407 + * + * + * def __getitem__(memoryview self, object index): # <<<<<<<<<<<<<< + * if index is Ellipsis: + * return self + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.memoryview.__getitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_indices); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":420 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError, "Cannot assign to read-only memoryview" + */ + +/* Python wrapper */ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value); /*proto*/ +static int __pyx_memoryview___setitem__(PyObject *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setitem__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((PyObject *)__pyx_v_index), ((PyObject *)__pyx_v_value)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_6__setitem__(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + PyObject *__pyx_v_have_slices = NULL; + PyObject *__pyx_v_obj = NULL; + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setitem__", 0); + __Pyx_INCREF(__pyx_v_index); + + /* "View.MemoryView":421 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError, "Cannot assign to read-only memoryview" + * + */ + if (unlikely(__pyx_v_self->view.readonly)) { + + /* "View.MemoryView":422 + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: + * raise TypeError, "Cannot assign to read-only memoryview" # <<<<<<<<<<<<<< + * + * have_slices, index = _unellipsify(index, self.view.ndim) + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_Cannot_assign_to_read_only_memor, 0, 0); + __PYX_ERR(1, 422, __pyx_L1_error) + + /* "View.MemoryView":421 + * + * def __setitem__(memoryview self, object index, object value): + * if self.view.readonly: # <<<<<<<<<<<<<< + * raise TypeError, "Cannot assign to read-only memoryview" + * + */ + } + + /* "View.MemoryView":424 + * raise TypeError, "Cannot assign to read-only memoryview" + * + * have_slices, index = _unellipsify(index, self.view.ndim) # <<<<<<<<<<<<<< + * + * if have_slices: + */ + __pyx_t_1 = _unellipsify(__pyx_v_index, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (likely(__pyx_t_1 != Py_None)) { + PyObject* sequence = __pyx_t_1; + Py_ssize_t size = __Pyx_PySequence_SIZE(sequence); + if (unlikely(size != 2)) { + if (size > 2) __Pyx_RaiseTooManyValuesError(2); + else if (size >= 0) __Pyx_RaiseNeedMoreValuesError(size); + __PYX_ERR(1, 424, __pyx_L1_error) + } + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_2 = PyTuple_GET_ITEM(sequence, 0); + __pyx_t_3 = PyTuple_GET_ITEM(sequence, 1); + __Pyx_INCREF(__pyx_t_2); + __Pyx_INCREF(__pyx_t_3); + #else + __pyx_t_2 = PySequence_ITEM(sequence, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PySequence_ITEM(sequence, 1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 424, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } else { + __Pyx_RaiseNoneNotIterableError(); __PYX_ERR(1, 424, __pyx_L1_error) + } + __pyx_v_have_slices = __pyx_t_2; + __pyx_t_2 = 0; + __Pyx_DECREF_SET(__pyx_v_index, __pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":426 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_have_slices); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 426, __pyx_L1_error) + if (__pyx_t_4) { + + /* "View.MemoryView":427 + * + * if have_slices: + * obj = self.is_slice(value) # <<<<<<<<<<<<<< + * if obj: + * self.setitem_slice_assignment(self[index], obj) + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->is_slice(__pyx_v_self, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 427, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_obj = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":428 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + __pyx_t_4 = __Pyx_PyObject_IsTrue(__pyx_v_obj); if (unlikely((__pyx_t_4 < 0))) __PYX_ERR(1, 428, __pyx_L1_error) + if (__pyx_t_4) { + + /* "View.MemoryView":429 + * obj = self.is_slice(value) + * if obj: + * self.setitem_slice_assignment(self[index], obj) # <<<<<<<<<<<<<< + * else: + * self.setitem_slice_assign_scalar(self[index], value) + */ + __pyx_t_1 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assignment(__pyx_v_self, __pyx_t_1, __pyx_v_obj); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 429, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "View.MemoryView":428 + * if have_slices: + * obj = self.is_slice(value) + * if obj: # <<<<<<<<<<<<<< + * self.setitem_slice_assignment(self[index], obj) + * else: + */ + goto __pyx_L5; + } + + /* "View.MemoryView":431 + * self.setitem_slice_assignment(self[index], obj) + * else: + * self.setitem_slice_assign_scalar(self[index], value) # <<<<<<<<<<<<<< + * else: + * self.setitem_indexed(index, value) + */ + /*else*/ { + __pyx_t_3 = __Pyx_PyObject_GetItem(((PyObject *)__pyx_v_self), __pyx_v_index); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 431, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + if (!(likely(((__pyx_t_3) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_3, __pyx_memoryview_type))))) __PYX_ERR(1, 431, __pyx_L1_error) + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_slice_assign_scalar(__pyx_v_self, ((struct __pyx_memoryview_obj *)__pyx_t_3), __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 431, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __pyx_L5:; + + /* "View.MemoryView":426 + * have_slices, index = _unellipsify(index, self.view.ndim) + * + * if have_slices: # <<<<<<<<<<<<<< + * obj = self.is_slice(value) + * if obj: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":433 + * self.setitem_slice_assign_scalar(self[index], value) + * else: + * self.setitem_indexed(index, value) # <<<<<<<<<<<<<< + * + * cdef is_slice(self, obj): + */ + /*else*/ { + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->setitem_indexed(__pyx_v_self, __pyx_v_index, __pyx_v_value); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 433, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __pyx_L4:; + + /* "View.MemoryView":420 + * return self.convert_item_to_object(itemp) + * + * def __setitem__(memoryview self, object index, object value): # <<<<<<<<<<<<<< + * if self.view.readonly: + * raise TypeError, "Cannot assign to read-only memoryview" + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__setitem__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_have_slices); + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":435 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + +static PyObject *__pyx_memoryview_is_slice(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_obj) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_slice", 0); + __Pyx_INCREF(__pyx_v_obj); + + /* "View.MemoryView":436 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_obj, __pyx_memoryview_type); + __pyx_t_2 = (!__pyx_t_1); + if (__pyx_t_2) { + + /* "View.MemoryView":437 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_4, &__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_5); + /*try:*/ { + + /* "View.MemoryView":438 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_6 = __Pyx_PyInt_From_int(((__pyx_v_self->flags & (~PyBUF_WRITABLE)) | PyBUF_ANY_CONTIGUOUS)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 438, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_6); + + /* "View.MemoryView":439 + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) # <<<<<<<<<<<<<< + * except TypeError: + * return None + */ + __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_self->dtype_is_object); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 439, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + + /* "View.MemoryView":438 + * if not isinstance(obj, memoryview): + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, # <<<<<<<<<<<<<< + * self.dtype_is_object) + * except TypeError: + */ + __pyx_t_8 = PyTuple_New(3); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 438, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_INCREF(__pyx_v_obj); + __Pyx_GIVEREF(__pyx_v_obj); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 0, __pyx_v_obj)) __PYX_ERR(1, 438, __pyx_L4_error); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 1, __pyx_t_6)) __PYX_ERR(1, 438, __pyx_L4_error); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_8, 2, __pyx_t_7)) __PYX_ERR(1, 438, __pyx_L4_error); + __pyx_t_6 = 0; + __pyx_t_7 = 0; + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_8, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 438, __pyx_L4_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __Pyx_DECREF_SET(__pyx_v_obj, __pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":437 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + goto __pyx_L9_try_end; + __pyx_L4_error:; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_XDECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "View.MemoryView":440 + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + * except TypeError: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_9 = __Pyx_PyErr_ExceptionMatches(__pyx_builtin_TypeError); + if (__pyx_t_9) { + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_6) < 0) __PYX_ERR(1, 440, __pyx_L6_except_error) + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_6); + + /* "View.MemoryView":441 + * self.dtype_is_object) + * except TypeError: + * return None # <<<<<<<<<<<<<< + * + * return obj + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_except_return; + } + goto __pyx_L6_except_error; + + /* "View.MemoryView":437 + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): + * try: # <<<<<<<<<<<<<< + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + * self.dtype_is_object) + */ + __pyx_L6_except_error:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L1_error; + __pyx_L7_except_return:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_XGIVEREF(__pyx_t_5); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_4, __pyx_t_5); + goto __pyx_L0; + __pyx_L9_try_end:; + } + + /* "View.MemoryView":436 + * + * cdef is_slice(self, obj): + * if not isinstance(obj, memoryview): # <<<<<<<<<<<<<< + * try: + * obj = memoryview(obj, self.flags & ~PyBUF_WRITABLE | PyBUF_ANY_CONTIGUOUS, + */ + } + + /* "View.MemoryView":443 + * return None + * + * return obj # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assignment(self, dst, src): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_obj); + __pyx_r = __pyx_v_obj; + goto __pyx_L0; + + /* "View.MemoryView":435 + * self.setitem_indexed(index, value) + * + * cdef is_slice(self, obj): # <<<<<<<<<<<<<< + * if not isinstance(obj, memoryview): + * try: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_obj); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":445 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + +static PyObject *__pyx_memoryview_setitem_slice_assignment(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_dst, PyObject *__pyx_v_src) { + __Pyx_memviewslice __pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_src_slice; + __Pyx_memviewslice __pyx_v_msrc; + __Pyx_memviewslice __pyx_v_mdst; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assignment", 0); + + /* "View.MemoryView":448 + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + * cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0] # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] + * + */ + if (!(likely(((__pyx_v_src) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_src, __pyx_memoryview_type))))) __PYX_ERR(1, 448, __pyx_L1_error) + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_src), (&__pyx_v_src_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 448, __pyx_L1_error) + __pyx_v_msrc = (__pyx_t_1[0]); + + /* "View.MemoryView":449 + * cdef __Pyx_memviewslice src_slice + * cdef __Pyx_memviewslice msrc = get_slice_from_memview(src, &src_slice)[0] + * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] # <<<<<<<<<<<<<< + * + * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) + */ + if (!(likely(((__pyx_v_dst) == Py_None) || likely(__Pyx_TypeTest(__pyx_v_dst, __pyx_memoryview_type))))) __PYX_ERR(1, 449, __pyx_L1_error) + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(((struct __pyx_memoryview_obj *)__pyx_v_dst), (&__pyx_v_dst_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 449, __pyx_L1_error) + __pyx_v_mdst = (__pyx_t_1[0]); + + /* "View.MemoryView":451 + * cdef __Pyx_memviewslice mdst = get_slice_from_memview(dst, &dst_slice)[0] + * + * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) # <<<<<<<<<<<<<< + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + */ + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_src, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 451, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_3 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 451, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_v_dst, __pyx_n_s_ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 451, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = __Pyx_PyInt_As_int(__pyx_t_2); if (unlikely((__pyx_t_4 == (int)-1) && PyErr_Occurred())) __PYX_ERR(1, 451, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_5 = __pyx_memoryview_copy_contents(__pyx_v_msrc, __pyx_v_mdst, __pyx_t_3, __pyx_t_4, __pyx_v_self->dtype_is_object); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 451, __pyx_L1_error) + + /* "View.MemoryView":445 + * return obj + * + * cdef setitem_slice_assignment(self, dst, src): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice dst_slice + * cdef __Pyx_memviewslice src_slice + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assignment", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":453 + * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + +static PyObject *__pyx_memoryview_setitem_slice_assign_scalar(struct __pyx_memoryview_obj *__pyx_v_self, struct __pyx_memoryview_obj *__pyx_v_dst, PyObject *__pyx_v_value) { + int __pyx_v_array[0x80]; + void *__pyx_v_tmp; + void *__pyx_v_item; + __Pyx_memviewslice *__pyx_v_dst_slice; + __Pyx_memviewslice __pyx_v_tmp_slice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + int __pyx_t_5; + char const *__pyx_t_6; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + PyObject *__pyx_t_9 = NULL; + PyObject *__pyx_t_10 = NULL; + PyObject *__pyx_t_11 = NULL; + PyObject *__pyx_t_12 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_slice_assign_scalar", 0); + + /* "View.MemoryView":455 + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): + * cdef int array[128] + * cdef void *tmp = NULL # <<<<<<<<<<<<<< + * cdef void *item + * + */ + __pyx_v_tmp = NULL; + + /* "View.MemoryView":460 + * cdef __Pyx_memviewslice *dst_slice + * cdef __Pyx_memviewslice tmp_slice + * dst_slice = get_slice_from_memview(dst, &tmp_slice) # <<<<<<<<<<<<<< + * + * if self.view.itemsize > sizeof(array): + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_dst, (&__pyx_v_tmp_slice)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 460, __pyx_L1_error) + __pyx_v_dst_slice = __pyx_t_1; + + /* "View.MemoryView":462 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + __pyx_t_2 = (((size_t)__pyx_v_self->view.itemsize) > (sizeof(__pyx_v_array))); + if (__pyx_t_2) { + + /* "View.MemoryView":463 + * + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) # <<<<<<<<<<<<<< + * if tmp == NULL: + * raise MemoryError + */ + __pyx_v_tmp = PyMem_Malloc(__pyx_v_self->view.itemsize); + + /* "View.MemoryView":464 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + __pyx_t_2 = (__pyx_v_tmp == NULL); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":465 + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + * raise MemoryError # <<<<<<<<<<<<<< + * item = tmp + * else: + */ + PyErr_NoMemory(); __PYX_ERR(1, 465, __pyx_L1_error) + + /* "View.MemoryView":464 + * if self.view.itemsize > sizeof(array): + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: # <<<<<<<<<<<<<< + * raise MemoryError + * item = tmp + */ + } + + /* "View.MemoryView":466 + * if tmp == NULL: + * raise MemoryError + * item = tmp # <<<<<<<<<<<<<< + * else: + * item = array + */ + __pyx_v_item = __pyx_v_tmp; + + /* "View.MemoryView":462 + * dst_slice = get_slice_from_memview(dst, &tmp_slice) + * + * if self.view.itemsize > sizeof(array): # <<<<<<<<<<<<<< + * tmp = PyMem_Malloc(self.view.itemsize) + * if tmp == NULL: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":468 + * item = tmp + * else: + * item = array # <<<<<<<<<<<<<< + * + * try: + */ + /*else*/ { + __pyx_v_item = ((void *)__pyx_v_array); + } + __pyx_L3:; + + /* "View.MemoryView":470 + * item = array + * + * try: # <<<<<<<<<<<<<< + * if self.dtype_is_object: + * ( item)[0] = value + */ + /*try:*/ { + + /* "View.MemoryView":471 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + if (__pyx_v_self->dtype_is_object) { + + /* "View.MemoryView":472 + * try: + * if self.dtype_is_object: + * ( item)[0] = value # <<<<<<<<<<<<<< + * else: + * self.assign_item_from_object( item, value) + */ + (((PyObject **)__pyx_v_item)[0]) = ((PyObject *)__pyx_v_value); + + /* "View.MemoryView":471 + * + * try: + * if self.dtype_is_object: # <<<<<<<<<<<<<< + * ( item)[0] = value + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":474 + * ( item)[0] = value + * else: + * self.assign_item_from_object( item, value) # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_t_3 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, ((char *)__pyx_v_item), __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 474, __pyx_L6_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L8:; + + /* "View.MemoryView":478 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + __pyx_t_2 = (__pyx_v_self->view.suboffsets != NULL); + if (__pyx_t_2) { + + /* "View.MemoryView":479 + * + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) # <<<<<<<<<<<<<< + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + * item, self.dtype_is_object) + */ + __pyx_t_4 = assert_direct_dimensions(__pyx_v_self->view.suboffsets, __pyx_v_self->view.ndim); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 479, __pyx_L6_error) + + /* "View.MemoryView":478 + * + * + * if self.view.suboffsets != NULL: # <<<<<<<<<<<<<< + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, + */ + } + + /* "View.MemoryView":480 + * if self.view.suboffsets != NULL: + * assert_direct_dimensions(self.view.suboffsets, self.view.ndim) + * slice_assign_scalar(dst_slice, dst.view.ndim, self.view.itemsize, # <<<<<<<<<<<<<< + * item, self.dtype_is_object) + * finally: + */ + __pyx_memoryview_slice_assign_scalar(__pyx_v_dst_slice, __pyx_v_dst->view.ndim, __pyx_v_self->view.itemsize, __pyx_v_item, __pyx_v_self->dtype_is_object); + } + + /* "View.MemoryView":483 + * item, self.dtype_is_object) + * finally: + * PyMem_Free(tmp) # <<<<<<<<<<<<<< + * + * cdef setitem_indexed(self, index, value): + */ + /*finally:*/ { + /*normal exit:*/{ + PyMem_Free(__pyx_v_tmp); + goto __pyx_L7; + } + __pyx_L6_error:; + /*exception exit:*/{ + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PY_MAJOR_VERSION >= 3) __Pyx_ExceptionSwap(&__pyx_t_10, &__pyx_t_11, &__pyx_t_12); + if ((PY_MAJOR_VERSION < 3) || unlikely(__Pyx_GetException(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9) < 0)) __Pyx_ErrFetch(&__pyx_t_7, &__pyx_t_8, &__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_7); + __Pyx_XGOTREF(__pyx_t_8); + __Pyx_XGOTREF(__pyx_t_9); + __Pyx_XGOTREF(__pyx_t_10); + __Pyx_XGOTREF(__pyx_t_11); + __Pyx_XGOTREF(__pyx_t_12); + __pyx_t_4 = __pyx_lineno; __pyx_t_5 = __pyx_clineno; __pyx_t_6 = __pyx_filename; + { + PyMem_Free(__pyx_v_tmp); + } + if (PY_MAJOR_VERSION >= 3) { + __Pyx_XGIVEREF(__pyx_t_10); + __Pyx_XGIVEREF(__pyx_t_11); + __Pyx_XGIVEREF(__pyx_t_12); + __Pyx_ExceptionReset(__pyx_t_10, __pyx_t_11, __pyx_t_12); + } + __Pyx_XGIVEREF(__pyx_t_7); + __Pyx_XGIVEREF(__pyx_t_8); + __Pyx_XGIVEREF(__pyx_t_9); + __Pyx_ErrRestore(__pyx_t_7, __pyx_t_8, __pyx_t_9); + __pyx_t_7 = 0; __pyx_t_8 = 0; __pyx_t_9 = 0; __pyx_t_10 = 0; __pyx_t_11 = 0; __pyx_t_12 = 0; + __pyx_lineno = __pyx_t_4; __pyx_clineno = __pyx_t_5; __pyx_filename = __pyx_t_6; + goto __pyx_L1_error; + } + __pyx_L7:; + } + + /* "View.MemoryView":453 + * memoryview_copy_contents(msrc, mdst, src.ndim, dst.ndim, self.dtype_is_object) + * + * cdef setitem_slice_assign_scalar(self, memoryview dst, value): # <<<<<<<<<<<<<< + * cdef int array[128] + * cdef void *tmp = NULL + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_slice_assign_scalar", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":485 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + +static PyObject *__pyx_memoryview_setitem_indexed(struct __pyx_memoryview_obj *__pyx_v_self, PyObject *__pyx_v_index, PyObject *__pyx_v_value) { + char *__pyx_v_itemp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + char *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("setitem_indexed", 0); + + /* "View.MemoryView":486 + * + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) # <<<<<<<<<<<<<< + * self.assign_item_from_object(itemp, value) + * + */ + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->get_item_pointer(__pyx_v_self, __pyx_v_index); if (unlikely(__pyx_t_1 == ((char *)NULL))) __PYX_ERR(1, 486, __pyx_L1_error) + __pyx_v_itemp = __pyx_t_1; + + /* "View.MemoryView":487 + * cdef setitem_indexed(self, index, value): + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->assign_item_from_object(__pyx_v_self, __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 487, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":485 + * PyMem_Free(tmp) + * + * cdef setitem_indexed(self, index, value): # <<<<<<<<<<<<<< + * cdef char *itemp = self.get_item_pointer(index) + * self.assign_item_from_object(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.setitem_indexed", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":489 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_convert_item_to_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_v_struct = NULL; + PyObject *__pyx_v_bytesitem = 0; + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_t_10; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":492 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef bytes bytesitem + * + */ + __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_struct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 492, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":495 + * cdef bytes bytesitem + * + * bytesitem = itemp[:self.view.itemsize] # <<<<<<<<<<<<<< + * try: + * result = struct.unpack(self.view.format, bytesitem) + */ + __pyx_t_1 = __Pyx_PyBytes_FromStringAndSize(__pyx_v_itemp + 0, __pyx_v_self->view.itemsize - 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 495, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_bytesitem = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":496 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_2, &__pyx_t_3, &__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_4); + /*try:*/ { + + /* "View.MemoryView":497 + * bytesitem = itemp[:self.view.itemsize] + * try: + * result = struct.unpack(self.view.format, bytesitem) # <<<<<<<<<<<<<< + * except struct.error: + * raise ValueError, "Unable to convert item to object" + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_unpack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 497, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 497, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_7 = NULL; + __pyx_t_8 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_8 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_7, __pyx_t_6, __pyx_v_bytesitem}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_8, 2+__pyx_t_8); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 497, __pyx_L3_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + __pyx_v_result = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":496 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + } + + /* "View.MemoryView":501 + * raise ValueError, "Unable to convert item to object" + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + /*else:*/ { + __pyx_t_9 = __Pyx_ssize_strlen(__pyx_v_self->view.format); if (unlikely(__pyx_t_9 == ((Py_ssize_t)-1))) __PYX_ERR(1, 501, __pyx_L5_except_error) + __pyx_t_10 = (__pyx_t_9 == 1); + if (__pyx_t_10) { + + /* "View.MemoryView":502 + * else: + * if len(self.view.format) == 1: + * return result[0] # <<<<<<<<<<<<<< + * return result + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_result, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 502, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L6_except_return; + + /* "View.MemoryView":501 + * raise ValueError, "Unable to convert item to object" + * else: + * if len(self.view.format) == 1: # <<<<<<<<<<<<<< + * return result[0] + * return result + */ + } + + /* "View.MemoryView":503 + * if len(self.view.format) == 1: + * return result[0] + * return result # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_result); + __pyx_r = __pyx_v_result; + goto __pyx_L6_except_return; + } + __pyx_L3_error:; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":498 + * try: + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: # <<<<<<<<<<<<<< + * raise ValueError, "Unable to convert item to object" + * else: + */ + __Pyx_ErrFetch(&__pyx_t_1, &__pyx_t_5, &__pyx_t_6); + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_error); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 498, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_8 = __Pyx_PyErr_GivenExceptionMatches(__pyx_t_1, __pyx_t_7); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_ErrRestore(__pyx_t_1, __pyx_t_5, __pyx_t_6); + __pyx_t_1 = 0; __pyx_t_5 = 0; __pyx_t_6 = 0; + if (__pyx_t_8) { + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_6, &__pyx_t_5, &__pyx_t_1) < 0) __PYX_ERR(1, 498, __pyx_L5_except_error) + __Pyx_XGOTREF(__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_1); + + /* "View.MemoryView":499 + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + * raise ValueError, "Unable to convert item to object" # <<<<<<<<<<<<<< + * else: + * if len(self.view.format) == 1: + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Unable_to_convert_item_to_object, 0, 0); + __PYX_ERR(1, 499, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + + /* "View.MemoryView":496 + * + * bytesitem = itemp[:self.view.itemsize] + * try: # <<<<<<<<<<<<<< + * result = struct.unpack(self.view.format, bytesitem) + * except struct.error: + */ + __pyx_L5_except_error:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L1_error; + __pyx_L6_except_return:; + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_4); + __Pyx_ExceptionReset(__pyx_t_2, __pyx_t_3, __pyx_t_4); + goto __pyx_L0; + } + + /* "View.MemoryView":489 + * self.assign_item_from_object(itemp, value) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("View.MemoryView.memoryview.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesitem); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":505 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + +static PyObject *__pyx_memoryview_assign_item_from_object(struct __pyx_memoryview_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_v_struct = NULL; + char __pyx_v_c; + PyObject *__pyx_v_bytesvalue = 0; + Py_ssize_t __pyx_v_i; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + Py_ssize_t __pyx_t_7; + PyObject *__pyx_t_8 = NULL; + char *__pyx_t_9; + char *__pyx_t_10; + char *__pyx_t_11; + char *__pyx_t_12; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":508 + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + * import struct # <<<<<<<<<<<<<< + * cdef char c + * cdef bytes bytesvalue + */ + __pyx_t_1 = __Pyx_ImportDottedModule(__pyx_n_s_struct, NULL); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 508, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_v_struct = __pyx_t_1; + __pyx_t_1 = 0; + + /* "View.MemoryView":513 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + __pyx_t_2 = PyTuple_Check(__pyx_v_value); + if (__pyx_t_2) { + + /* "View.MemoryView":514 + * + * if isinstance(value, tuple): + * bytesvalue = struct.pack(self.view.format, *value) # <<<<<<<<<<<<<< + * else: + * bytesvalue = struct.pack(self.view.format, value) + */ + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 514, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = PyTuple_New(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 514, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PySequence_Tuple(__pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = PyNumber_Add(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 514, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_PyObject_Call(__pyx_t_1, __pyx_t_5, NULL); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 514, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_3))) __PYX_ERR(1, 514, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":513 + * cdef Py_ssize_t i + * + * if isinstance(value, tuple): # <<<<<<<<<<<<<< + * bytesvalue = struct.pack(self.view.format, *value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":516 + * bytesvalue = struct.pack(self.view.format, *value) + * else: + * bytesvalue = struct.pack(self.view.format, value) # <<<<<<<<<<<<<< + * + * for i, c in enumerate(bytesvalue): + */ + /*else*/ { + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_v_struct, __pyx_n_s_pack); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 516, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = __Pyx_PyBytes_FromString(__pyx_v_self->view.format); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 516, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = NULL; + __pyx_t_6 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_5))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_5); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_5); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_5, function); + __pyx_t_6 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_4, __pyx_t_1, __pyx_v_value}; + __pyx_t_3 = __Pyx_PyObject_FastCall(__pyx_t_5, __pyx_callargs+1-__pyx_t_6, 2+__pyx_t_6); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 516, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + if (!(likely(PyBytes_CheckExact(__pyx_t_3))||((__pyx_t_3) == Py_None) || __Pyx_RaiseUnexpectedTypeError("bytes", __pyx_t_3))) __PYX_ERR(1, 516, __pyx_L1_error) + __pyx_v_bytesvalue = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":518 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_7 = 0; + if (unlikely(__pyx_v_bytesvalue == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' is not iterable"); + __PYX_ERR(1, 518, __pyx_L1_error) + } + __Pyx_INCREF(__pyx_v_bytesvalue); + __pyx_t_8 = __pyx_v_bytesvalue; + __pyx_t_10 = PyBytes_AS_STRING(__pyx_t_8); + __pyx_t_11 = (__pyx_t_10 + PyBytes_GET_SIZE(__pyx_t_8)); + for (__pyx_t_12 = __pyx_t_10; __pyx_t_12 < __pyx_t_11; __pyx_t_12++) { + __pyx_t_9 = __pyx_t_12; + __pyx_v_c = (__pyx_t_9[0]); + + /* "View.MemoryView":519 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + __pyx_v_i = __pyx_t_7; + + /* "View.MemoryView":518 + * bytesvalue = struct.pack(self.view.format, value) + * + * for i, c in enumerate(bytesvalue): # <<<<<<<<<<<<<< + * itemp[i] = c + * + */ + __pyx_t_7 = (__pyx_t_7 + 1); + + /* "View.MemoryView":519 + * + * for i, c in enumerate(bytesvalue): + * itemp[i] = c # <<<<<<<<<<<<<< + * + * @cname('getbuffer') + */ + (__pyx_v_itemp[__pyx_v_i]) = __pyx_v_c; + } + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + + /* "View.MemoryView":505 + * return result + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * """Only used if instantiated manually by the user, or if Cython doesn't + * know how to convert the type""" + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memoryview.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_struct); + __Pyx_XDECREF(__pyx_v_bytesvalue); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":521 + * itemp[i] = c + * + * @cname('getbuffer') # <<<<<<<<<<<<<< + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + */ + +/* Python wrapper */ +CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags); /*proto*/ +CYTHON_UNUSED static int __pyx_memoryview_getbuffer(PyObject *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__getbuffer__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(((struct __pyx_memoryview_obj *)__pyx_v_self), ((Py_buffer *)__pyx_v_info), ((int)__pyx_v_flags)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static int __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_8__getbuffer__(struct __pyx_memoryview_obj *__pyx_v_self, Py_buffer *__pyx_v_info, int __pyx_v_flags) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + Py_ssize_t *__pyx_t_3; + char *__pyx_t_4; + void *__pyx_t_5; + int __pyx_t_6; + Py_ssize_t __pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + if (unlikely(__pyx_v_info == NULL)) { + PyErr_SetString(PyExc_BufferError, "PyObject_GetBuffer: view==NULL argument is obsolete"); + return -1; + } + __Pyx_RefNannySetupContext("__getbuffer__", 0); + __pyx_v_info->obj = Py_None; __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(__pyx_v_info->obj); + + /* "View.MemoryView":523 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError, "Cannot create writable memory view from read-only memoryview" + * + */ + __pyx_t_2 = ((__pyx_v_flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_1 = __pyx_v_self->view.readonly; + __pyx_L4_bool_binop_done:; + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":524 + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + * raise ValueError, "Cannot create writable memory view from read-only memoryview" # <<<<<<<<<<<<<< + * + * if flags & PyBUF_ND: + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Cannot_create_writable_memory_vi, 0, 0); + __PYX_ERR(1, 524, __pyx_L1_error) + + /* "View.MemoryView":523 + * @cname('getbuffer') + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: # <<<<<<<<<<<<<< + * raise ValueError, "Cannot create writable memory view from read-only memoryview" + * + */ + } + + /* "View.MemoryView":526 + * raise ValueError, "Cannot create writable memory view from read-only memoryview" + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_ND) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":527 + * + * if flags & PyBUF_ND: + * info.shape = self.view.shape # <<<<<<<<<<<<<< + * else: + * info.shape = NULL + */ + __pyx_t_3 = __pyx_v_self->view.shape; + __pyx_v_info->shape = __pyx_t_3; + + /* "View.MemoryView":526 + * raise ValueError, "Cannot create writable memory view from read-only memoryview" + * + * if flags & PyBUF_ND: # <<<<<<<<<<<<<< + * info.shape = self.view.shape + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":529 + * info.shape = self.view.shape + * else: + * info.shape = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_STRIDES: + */ + /*else*/ { + __pyx_v_info->shape = NULL; + } + __pyx_L6:; + + /* "View.MemoryView":531 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_STRIDES) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":532 + * + * if flags & PyBUF_STRIDES: + * info.strides = self.view.strides # <<<<<<<<<<<<<< + * else: + * info.strides = NULL + */ + __pyx_t_3 = __pyx_v_self->view.strides; + __pyx_v_info->strides = __pyx_t_3; + + /* "View.MemoryView":531 + * info.shape = NULL + * + * if flags & PyBUF_STRIDES: # <<<<<<<<<<<<<< + * info.strides = self.view.strides + * else: + */ + goto __pyx_L7; + } + + /* "View.MemoryView":534 + * info.strides = self.view.strides + * else: + * info.strides = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_INDIRECT: + */ + /*else*/ { + __pyx_v_info->strides = NULL; + } + __pyx_L7:; + + /* "View.MemoryView":536 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_INDIRECT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":537 + * + * if flags & PyBUF_INDIRECT: + * info.suboffsets = self.view.suboffsets # <<<<<<<<<<<<<< + * else: + * info.suboffsets = NULL + */ + __pyx_t_3 = __pyx_v_self->view.suboffsets; + __pyx_v_info->suboffsets = __pyx_t_3; + + /* "View.MemoryView":536 + * info.strides = NULL + * + * if flags & PyBUF_INDIRECT: # <<<<<<<<<<<<<< + * info.suboffsets = self.view.suboffsets + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":539 + * info.suboffsets = self.view.suboffsets + * else: + * info.suboffsets = NULL # <<<<<<<<<<<<<< + * + * if flags & PyBUF_FORMAT: + */ + /*else*/ { + __pyx_v_info->suboffsets = NULL; + } + __pyx_L8:; + + /* "View.MemoryView":541 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + __pyx_t_1 = ((__pyx_v_flags & PyBUF_FORMAT) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":542 + * + * if flags & PyBUF_FORMAT: + * info.format = self.view.format # <<<<<<<<<<<<<< + * else: + * info.format = NULL + */ + __pyx_t_4 = __pyx_v_self->view.format; + __pyx_v_info->format = __pyx_t_4; + + /* "View.MemoryView":541 + * info.suboffsets = NULL + * + * if flags & PyBUF_FORMAT: # <<<<<<<<<<<<<< + * info.format = self.view.format + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":544 + * info.format = self.view.format + * else: + * info.format = NULL # <<<<<<<<<<<<<< + * + * info.buf = self.view.buf + */ + /*else*/ { + __pyx_v_info->format = NULL; + } + __pyx_L9:; + + /* "View.MemoryView":546 + * info.format = NULL + * + * info.buf = self.view.buf # <<<<<<<<<<<<<< + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + */ + __pyx_t_5 = __pyx_v_self->view.buf; + __pyx_v_info->buf = __pyx_t_5; + + /* "View.MemoryView":547 + * + * info.buf = self.view.buf + * info.ndim = self.view.ndim # <<<<<<<<<<<<<< + * info.itemsize = self.view.itemsize + * info.len = self.view.len + */ + __pyx_t_6 = __pyx_v_self->view.ndim; + __pyx_v_info->ndim = __pyx_t_6; + + /* "View.MemoryView":548 + * info.buf = self.view.buf + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize # <<<<<<<<<<<<<< + * info.len = self.view.len + * info.readonly = self.view.readonly + */ + __pyx_t_7 = __pyx_v_self->view.itemsize; + __pyx_v_info->itemsize = __pyx_t_7; + + /* "View.MemoryView":549 + * info.ndim = self.view.ndim + * info.itemsize = self.view.itemsize + * info.len = self.view.len # <<<<<<<<<<<<<< + * info.readonly = self.view.readonly + * info.obj = self + */ + __pyx_t_7 = __pyx_v_self->view.len; + __pyx_v_info->len = __pyx_t_7; + + /* "View.MemoryView":550 + * info.itemsize = self.view.itemsize + * info.len = self.view.len + * info.readonly = self.view.readonly # <<<<<<<<<<<<<< + * info.obj = self + * + */ + __pyx_t_1 = __pyx_v_self->view.readonly; + __pyx_v_info->readonly = __pyx_t_1; + + /* "View.MemoryView":551 + * info.len = self.view.len + * info.readonly = self.view.readonly + * info.obj = self # <<<<<<<<<<<<<< + * + * + */ + __Pyx_INCREF((PyObject *)__pyx_v_self); + __Pyx_GIVEREF((PyObject *)__pyx_v_self); + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); + __pyx_v_info->obj = ((PyObject *)__pyx_v_self); + + /* "View.MemoryView":521 + * itemp[i] = c + * + * @cname('getbuffer') # <<<<<<<<<<<<<< + * def __getbuffer__(self, Py_buffer *info, int flags): + * if flags & PyBUF_WRITABLE and self.view.readonly: + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__getbuffer__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + if (__pyx_v_info->obj != NULL) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + goto __pyx_L2; + __pyx_L0:; + if (__pyx_v_info->obj == Py_None) { + __Pyx_GOTREF(__pyx_v_info->obj); + __Pyx_DECREF(__pyx_v_info->obj); __pyx_v_info->obj = 0; + } + __pyx_L2:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":554 + * + * + * @property # <<<<<<<<<<<<<< + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_1T___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":556 + * @property + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) # <<<<<<<<<<<<<< + * transpose_memslice(&result.from_slice) + * return result + */ + __pyx_t_1 = __pyx_memoryview_copy_object(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 556, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + if (!(likely(((__pyx_t_1) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_1, __pyx_memoryviewslice_type))))) __PYX_ERR(1, 556, __pyx_L1_error) + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":557 + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_t_2 = __pyx_memslice_transpose((&__pyx_v_result->from_slice)); if (unlikely(__pyx_t_2 == ((int)-1))) __PYX_ERR(1, 557, __pyx_L1_error) + + /* "View.MemoryView":558 + * cdef _memoryviewslice result = memoryview_copy(self) + * transpose_memslice(&result.from_slice) + * return result # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF((PyObject *)__pyx_v_result); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":554 + * + * + * @property # <<<<<<<<<<<<<< + * def T(self): + * cdef _memoryviewslice result = memoryview_copy(self) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.T.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":560 + * return result + * + * @property # <<<<<<<<<<<<<< + * def base(self): + * return self._get_base() + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4base___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":562 + * @property + * def base(self): + * return self._get_base() # <<<<<<<<<<<<<< + * + * cdef _get_base(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = ((struct __pyx_vtabstruct_memoryview *)__pyx_v_self->__pyx_vtab)->_get_base(__pyx_v_self); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 562, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":560 + * return result + * + * @property # <<<<<<<<<<<<<< + * def base(self): + * return self._get_base() + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.base.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":564 + * return self._get_base() + * + * cdef _get_base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + +static PyObject *__pyx_memoryview__get_base(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("_get_base", 0); + + /* "View.MemoryView":565 + * + * cdef _get_base(self): + * return self.obj # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->obj); + __pyx_r = __pyx_v_self->obj; + goto __pyx_L0; + + /* "View.MemoryView":564 + * return self._get_base() + * + * cdef _get_base(self): # <<<<<<<<<<<<<< + * return self.obj + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":567 + * return self.obj + * + * @property # <<<<<<<<<<<<<< + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_5shape___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_7genexpr__pyx_v_length; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":569 + * @property + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + { /* enter inner scope */ + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 569, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_7genexpr__pyx_v_length = (__pyx_t_2[0]); + __pyx_t_5 = PyInt_FromSsize_t(__pyx_7genexpr__pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 569, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_5))) __PYX_ERR(1, 569, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + } + } /* exit inner scope */ + __pyx_t_5 = PyList_AsTuple(((PyObject*)__pyx_t_1)); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 569, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_r = __pyx_t_5; + __pyx_t_5 = 0; + goto __pyx_L0; + + /* "View.MemoryView":567 + * return self.obj + * + * @property # <<<<<<<<<<<<<< + * def shape(self): + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.shape.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":571 + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + * @property # <<<<<<<<<<<<<< + * def strides(self): + * if self.view.strides == NULL: + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_7strides___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_8genexpr1__pyx_v_stride; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":573 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError, "Buffer view does not expose strides" + */ + __pyx_t_1 = (__pyx_v_self->view.strides == NULL); + if (unlikely(__pyx_t_1)) { + + /* "View.MemoryView":575 + * if self.view.strides == NULL: + * + * raise ValueError, "Buffer view does not expose strides" # <<<<<<<<<<<<<< + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Buffer_view_does_not_expose_stri, 0, 0); + __PYX_ERR(1, 575, __pyx_L1_error) + + /* "View.MemoryView":573 + * @property + * def strides(self): + * if self.view.strides == NULL: # <<<<<<<<<<<<<< + * + * raise ValueError, "Buffer view does not expose strides" + */ + } + + /* "View.MemoryView":577 + * raise ValueError, "Buffer view does not expose strides" + * + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + { /* enter inner scope */ + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (__pyx_v_self->view.strides + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.strides; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_8genexpr1__pyx_v_stride = (__pyx_t_3[0]); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_8genexpr1__pyx_v_stride); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + } /* exit inner scope */ + __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 577, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "View.MemoryView":571 + * return tuple([length for length in self.view.shape[:self.view.ndim]]) + * + * @property # <<<<<<<<<<<<<< + * def strides(self): + * if self.view.strides == NULL: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.strides.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":579 + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + * + * @property # <<<<<<<<<<<<<< + * def suboffsets(self): + * if self.view.suboffsets == NULL: + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_10suboffsets___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_8genexpr2__pyx_v_suboffset; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + Py_ssize_t *__pyx_t_5; + PyObject *__pyx_t_6 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":581 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + __pyx_t_1 = (__pyx_v_self->view.suboffsets == NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":582 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PySequence_Multiply(__pyx_tuple__4, __pyx_v_self->view.ndim); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 582, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":581 + * @property + * def suboffsets(self): + * if self.view.suboffsets == NULL: # <<<<<<<<<<<<<< + * return (-1,) * self.view.ndim + * + */ + } + + /* "View.MemoryView":584 + * return (-1,) * self.view.ndim + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + { /* enter inner scope */ + __pyx_t_2 = PyList_New(0); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 584, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = (__pyx_v_self->view.suboffsets + __pyx_v_self->view.ndim); + for (__pyx_t_5 = __pyx_v_self->view.suboffsets; __pyx_t_5 < __pyx_t_4; __pyx_t_5++) { + __pyx_t_3 = __pyx_t_5; + __pyx_8genexpr2__pyx_v_suboffset = (__pyx_t_3[0]); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_8genexpr2__pyx_v_suboffset); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_2, (PyObject*)__pyx_t_6))) __PYX_ERR(1, 584, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + } /* exit inner scope */ + __pyx_t_6 = PyList_AsTuple(((PyObject*)__pyx_t_2)); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 584, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_6; + __pyx_t_6 = 0; + goto __pyx_L0; + + /* "View.MemoryView":579 + * return tuple([stride for stride in self.view.strides[:self.view.ndim]]) + * + * @property # <<<<<<<<<<<<<< + * def suboffsets(self): + * if self.view.suboffsets == NULL: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_AddTraceback("View.MemoryView.memoryview.suboffsets.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":586 + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + * + * @property # <<<<<<<<<<<<<< + * def ndim(self): + * return self.view.ndim + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4ndim___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":588 + * @property + * def ndim(self): + * return self.view.ndim # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_self->view.ndim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 588, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":586 + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + * + * @property # <<<<<<<<<<<<<< + * def ndim(self): + * return self.view.ndim + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.ndim.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":590 + * return self.view.ndim + * + * @property # <<<<<<<<<<<<<< + * def itemsize(self): + * return self.view.itemsize + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_8itemsize___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":592 + * @property + * def itemsize(self): + * return self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 592, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":590 + * return self.view.ndim + * + * @property # <<<<<<<<<<<<<< + * def itemsize(self): + * return self.view.itemsize + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview.itemsize.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":594 + * return self.view.itemsize + * + * @property # <<<<<<<<<<<<<< + * def nbytes(self): + * return self.size * self.view.itemsize + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_6nbytes___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":596 + * @property + * def nbytes(self): + * return self.size * self.view.itemsize # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_size); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 596, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_self->view.itemsize); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 596, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 596, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_3; + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":594 + * return self.view.itemsize + * + * @property # <<<<<<<<<<<<<< + * def nbytes(self): + * return self.size * self.view.itemsize + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.nbytes.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":598 + * return self.size * self.view.itemsize + * + * @property # <<<<<<<<<<<<<< + * def size(self): + * if self._size is None: + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__get__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView_10memoryview_4size___get__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_v_result = NULL; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__get__", 0); + + /* "View.MemoryView":600 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + __pyx_t_1 = (__pyx_v_self->_size == Py_None); + if (__pyx_t_1) { + + /* "View.MemoryView":601 + * def size(self): + * if self._size is None: + * result = 1 # <<<<<<<<<<<<<< + * + * for length in self.view.shape[:self.view.ndim]: + */ + __Pyx_INCREF(__pyx_int_1); + __pyx_v_result = __pyx_int_1; + + /* "View.MemoryView":603 + * result = 1 + * + * for length in self.view.shape[:self.view.ndim]: # <<<<<<<<<<<<<< + * result *= length + * + */ + __pyx_t_3 = (__pyx_v_self->view.shape + __pyx_v_self->view.ndim); + for (__pyx_t_4 = __pyx_v_self->view.shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_t_5 = PyInt_FromSsize_t((__pyx_t_2[0])); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 603, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_5); + __pyx_t_5 = 0; + + /* "View.MemoryView":604 + * + * for length in self.view.shape[:self.view.ndim]: + * result *= length # <<<<<<<<<<<<<< + * + * self._size = result + */ + __pyx_t_5 = PyNumber_InPlaceMultiply(__pyx_v_result, __pyx_v_length); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 604, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF_SET(__pyx_v_result, __pyx_t_5); + __pyx_t_5 = 0; + } + + /* "View.MemoryView":606 + * result *= length + * + * self._size = result # <<<<<<<<<<<<<< + * + * return self._size + */ + __Pyx_INCREF(__pyx_v_result); + __Pyx_GIVEREF(__pyx_v_result); + __Pyx_GOTREF(__pyx_v_self->_size); + __Pyx_DECREF(__pyx_v_self->_size); + __pyx_v_self->_size = __pyx_v_result; + + /* "View.MemoryView":600 + * @property + * def size(self): + * if self._size is None: # <<<<<<<<<<<<<< + * result = 1 + * + */ + } + + /* "View.MemoryView":608 + * self._size = result + * + * return self._size # <<<<<<<<<<<<<< + * + * def __len__(self): + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->_size); + __pyx_r = __pyx_v_self->_size; + goto __pyx_L0; + + /* "View.MemoryView":598 + * return self.size * self.view.itemsize + * + * @property # <<<<<<<<<<<<<< + * def size(self): + * if self._size is None: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.memoryview.size.__get__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":610 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + +/* Python wrapper */ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self); /*proto*/ +static Py_ssize_t __pyx_memoryview___len__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__len__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static Py_ssize_t __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_10__len__(struct __pyx_memoryview_obj *__pyx_v_self) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("__len__", 0); + + /* "View.MemoryView":611 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + __pyx_t_1 = (__pyx_v_self->view.ndim >= 1); + if (__pyx_t_1) { + + /* "View.MemoryView":612 + * def __len__(self): + * if self.view.ndim >= 1: + * return self.view.shape[0] # <<<<<<<<<<<<<< + * + * return 0 + */ + __pyx_r = (__pyx_v_self->view.shape[0]); + goto __pyx_L0; + + /* "View.MemoryView":611 + * + * def __len__(self): + * if self.view.ndim >= 1: # <<<<<<<<<<<<<< + * return self.view.shape[0] + * + */ + } + + /* "View.MemoryView":614 + * return self.view.shape[0] + * + * return 0 # <<<<<<<<<<<<<< + * + * def __repr__(self): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":610 + * return self._size + * + * def __len__(self): # <<<<<<<<<<<<<< + * if self.view.ndim >= 1: + * return self.view.shape[0] + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":616 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___repr__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__repr__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_12__repr__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__repr__", 0); + + /* "View.MemoryView":617 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":618 + * def __repr__(self): + * return "" % (self.base.__class__.__name__, + * id(self)) # <<<<<<<<<<<<<< + * + * def __str__(self): + */ + __pyx_t_2 = __Pyx_PyObject_CallOneArg(__pyx_builtin_id, ((PyObject *)__pyx_v_self)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 618, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "View.MemoryView":617 + * + * def __repr__(self): + * return "" % (self.base.__class__.__name__, # <<<<<<<<<<<<<< + * id(self)) + * + */ + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 617, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1)) __PYX_ERR(1, 617, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 617, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":616 + * return 0 + * + * def __repr__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__, + * id(self)) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview.__repr__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":620 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self); /*proto*/ +static PyObject *__pyx_memoryview___str__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__str__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_14__str__(struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__str__", 0); + + /* "View.MemoryView":621 + * + * def __str__(self): + * return "" % (self.base.__class__.__name__,) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v_self), __pyx_n_s_base); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyObject_GetAttrStr(__pyx_t_1, __pyx_n_s_class); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 621, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_name_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 621, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_MemoryView_of_r_object, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 621, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":620 + * id(self)) + * + * def __str__(self): # <<<<<<<<<<<<<< + * return "" % (self.base.__class__.__name__,) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.__str__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":624 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_memoryview_is_c_contig(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_c_contig (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 624, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("is_c_contig", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "is_c_contig", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_16is_c_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_c_contig", 0); + + /* "View.MemoryView":627 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 627, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":628 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'C', self.view.ndim) # <<<<<<<<<<<<<< + * + * def is_f_contig(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'C', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 628, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":624 + * + * + * def is_c_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_c_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":630 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_memoryview_is_f_contig(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_f_contig (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 630, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("is_f_contig", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "is_f_contig", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_18is_f_contig(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice *__pyx_v_mslice; + __Pyx_memviewslice __pyx_v_tmp; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice *__pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("is_f_contig", 0); + + /* "View.MemoryView":633 + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) # <<<<<<<<<<<<<< + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + */ + __pyx_t_1 = __pyx_memoryview_get_slice_from_memoryview(__pyx_v_self, (&__pyx_v_tmp)); if (unlikely(__pyx_t_1 == ((__Pyx_memviewslice *)NULL))) __PYX_ERR(1, 633, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":634 + * cdef __Pyx_memviewslice tmp + * mslice = get_slice_from_memview(self, &tmp) + * return slice_is_contig(mslice[0], 'F', self.view.ndim) # <<<<<<<<<<<<<< + * + * def copy(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_memviewslice_is_contig((__pyx_v_mslice[0]), 'F', __pyx_v_self->view.ndim)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 634, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":630 + * return slice_is_contig(mslice[0], 'C', self.view.ndim) + * + * def is_f_contig(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice *mslice + * cdef __Pyx_memviewslice tmp + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.is_f_contig", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":636 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_memoryview_copy(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 636, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("copy", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "copy", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_20copy(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_mslice; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy", 0); + + /* "View.MemoryView":638 + * def copy(self): + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &mslice) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_F_CONTIGUOUS)); + + /* "View.MemoryView":640 + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + * + * slice_copy(self, &mslice) # <<<<<<<<<<<<<< + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_mslice)); + + /* "View.MemoryView":641 + * + * slice_copy(self, &mslice) + * mslice = slice_copy_contig(&mslice, "c", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_C_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_mslice), ((char *)"c"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_C_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 641, __pyx_L1_error) + __pyx_v_mslice = __pyx_t_1; + + /* "View.MemoryView":646 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &mslice) # <<<<<<<<<<<<<< + * + * def copy_fortran(self): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_mslice)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 646, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":636 + * return slice_is_contig(mslice[0], 'F', self.view.ndim) + * + * def copy(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice mslice + * cdef int flags = self.flags & ~PyBUF_F_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":648 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + +/* Python wrapper */ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_memoryview_copy_fortran(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("copy_fortran (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 648, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("copy_fortran", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "copy_fortran", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_memoryview___pyx_pf_15View_dot_MemoryView_10memoryview_22copy_fortran(struct __pyx_memoryview_obj *__pyx_v_self) { + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + int __pyx_v_flags; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("copy_fortran", 0); + + /* "View.MemoryView":650 + * def copy_fortran(self): + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS # <<<<<<<<<<<<<< + * + * slice_copy(self, &src) + */ + __pyx_v_flags = (__pyx_v_self->flags & (~PyBUF_C_CONTIGUOUS)); + + /* "View.MemoryView":652 + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + * + * slice_copy(self, &src) # <<<<<<<<<<<<<< + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, + * self.view.itemsize, + */ + __pyx_memoryview_slice_copy(__pyx_v_self, (&__pyx_v_src)); + + /* "View.MemoryView":653 + * + * slice_copy(self, &src) + * dst = slice_copy_contig(&src, "fortran", self.view.ndim, # <<<<<<<<<<<<<< + * self.view.itemsize, + * flags|PyBUF_F_CONTIGUOUS, + */ + __pyx_t_1 = __pyx_memoryview_copy_new_contig((&__pyx_v_src), ((char *)"fortran"), __pyx_v_self->view.ndim, __pyx_v_self->view.itemsize, (__pyx_v_flags | PyBUF_F_CONTIGUOUS), __pyx_v_self->dtype_is_object); if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 653, __pyx_L1_error) + __pyx_v_dst = __pyx_t_1; + + /* "View.MemoryView":658 + * self.dtype_is_object) + * + * return memoryview_copy_from_slice(self, &dst) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_copy_object_from_slice(__pyx_v_self, (&__pyx_v_dst)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 658, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":648 + * return memoryview_copy_from_slice(self, &mslice) + * + * def copy_fortran(self): # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice src, dst + * cdef int flags = self.flags & ~PyBUF_C_CONTIGUOUS + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.memoryview.copy_fortran", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 1, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_memoryview___reduce_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryview_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 3, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v___pyx_state = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_memoryview_2__setstate_cython__(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v___pyx_state); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryview_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryview_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.memoryview.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":662 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + +static PyObject *__pyx_memoryview_new(PyObject *__pyx_v_o, int __pyx_v_flags, int __pyx_v_dtype_is_object, __Pyx_TypeInfo *__pyx_v_typeinfo) { + struct __pyx_memoryview_obj *__pyx_v_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_cwrapper", 0); + + /* "View.MemoryView":663 + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) # <<<<<<<<<<<<<< + * result.typeinfo = typeinfo + * return result + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_flags); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 663, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 663, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_o); + __Pyx_GIVEREF(__pyx_v_o); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_o)) __PYX_ERR(1, 663, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_1)) __PYX_ERR(1, 663, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyObject_Call(((PyObject *)__pyx_memoryview_type), __pyx_t_3, NULL); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 663, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":664 + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo # <<<<<<<<<<<<<< + * return result + * + */ + __pyx_v_result->typeinfo = __pyx_v_typeinfo; + + /* "View.MemoryView":665 + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_check') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF((PyObject *)__pyx_v_result); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":662 + * + * @cname('__pyx_memoryview_new') + * cdef memoryview_cwrapper(object o, int flags, bint dtype_is_object, __Pyx_TypeInfo *typeinfo): # <<<<<<<<<<<<<< + * cdef memoryview result = memoryview(o, flags, dtype_is_object) + * result.typeinfo = typeinfo + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_cwrapper", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":668 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o) noexcept: # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + +static CYTHON_INLINE int __pyx_memoryview_check(PyObject *__pyx_v_o) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("memoryview_check", 0); + + /* "View.MemoryView":669 + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o) noexcept: + * return isinstance(o, memoryview) # <<<<<<<<<<<<<< + * + * cdef tuple _unellipsify(object index, int ndim): + */ + __pyx_t_1 = __Pyx_TypeCheck(__pyx_v_o, __pyx_memoryview_type); + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "View.MemoryView":668 + * + * @cname('__pyx_memoryview_check') + * cdef inline bint memoryview_check(object o) noexcept: # <<<<<<<<<<<<<< + * return isinstance(o, memoryview) + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":671 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + +static PyObject *_unellipsify(PyObject *__pyx_v_index, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_idx; + PyObject *__pyx_v_tup = NULL; + PyObject *__pyx_v_result = NULL; + int __pyx_v_have_slices; + int __pyx_v_seen_ellipsis; + PyObject *__pyx_v_item = NULL; + Py_ssize_t __pyx_v_nslices; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_UCS4 __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("_unellipsify", 0); + + /* "View.MemoryView":677 + * """ + * cdef Py_ssize_t idx + * tup = index if isinstance(index, tuple) else (index,) # <<<<<<<<<<<<<< + * + * result = [slice(None)] * ndim + */ + __pyx_t_2 = PyTuple_Check(__pyx_v_index); + if (__pyx_t_2) { + __Pyx_INCREF(((PyObject*)__pyx_v_index)); + __pyx_t_1 = __pyx_v_index; + } else { + __pyx_t_3 = PyTuple_New(1); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 677, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(__pyx_v_index); + __Pyx_GIVEREF(__pyx_v_index); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_v_index)) __PYX_ERR(1, 677, __pyx_L1_error); + __pyx_t_1 = __pyx_t_3; + __pyx_t_3 = 0; + } + __pyx_v_tup = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":679 + * tup = index if isinstance(index, tuple) else (index,) + * + * result = [slice(None)] * ndim # <<<<<<<<<<<<<< + * have_slices = False + * seen_ellipsis = False + */ + __pyx_t_1 = PyList_New(1 * ((__pyx_v_ndim<0) ? 0:__pyx_v_ndim)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + { Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < __pyx_v_ndim; __pyx_temp++) { + __Pyx_INCREF(__pyx_slice__5); + __Pyx_GIVEREF(__pyx_slice__5); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, __pyx_temp, __pyx_slice__5)) __PYX_ERR(1, 679, __pyx_L1_error); + } + } + __pyx_v_result = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "View.MemoryView":680 + * + * result = [slice(None)] * ndim + * have_slices = False # <<<<<<<<<<<<<< + * seen_ellipsis = False + * idx = 0 + */ + __pyx_v_have_slices = 0; + + /* "View.MemoryView":681 + * result = [slice(None)] * ndim + * have_slices = False + * seen_ellipsis = False # <<<<<<<<<<<<<< + * idx = 0 + * for item in tup: + */ + __pyx_v_seen_ellipsis = 0; + + /* "View.MemoryView":682 + * have_slices = False + * seen_ellipsis = False + * idx = 0 # <<<<<<<<<<<<<< + * for item in tup: + * if item is Ellipsis: + */ + __pyx_v_idx = 0; + + /* "View.MemoryView":683 + * seen_ellipsis = False + * idx = 0 + * for item in tup: # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + if (unlikely(__pyx_v_tup == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); + __PYX_ERR(1, 683, __pyx_L1_error) + } + __pyx_t_1 = __pyx_v_tup; __Pyx_INCREF(__pyx_t_1); __pyx_t_4 = 0; + for (;;) { + if (__pyx_t_4 >= PyTuple_GET_SIZE(__pyx_t_1)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_3 = PyTuple_GET_ITEM(__pyx_t_1, __pyx_t_4); __Pyx_INCREF(__pyx_t_3); __pyx_t_4++; if (unlikely((0 < 0))) __PYX_ERR(1, 683, __pyx_L1_error) + #else + __pyx_t_3 = PySequence_ITEM(__pyx_t_1, __pyx_t_4); __pyx_t_4++; if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 683, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + #endif + __Pyx_XDECREF_SET(__pyx_v_item, __pyx_t_3); + __pyx_t_3 = 0; + + /* "View.MemoryView":684 + * idx = 0 + * for item in tup: + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * idx += ndim - len(tup) + */ + __pyx_t_2 = (__pyx_v_item == __pyx_builtin_Ellipsis); + if (__pyx_t_2) { + + /* "View.MemoryView":685 + * for item in tup: + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * idx += ndim - len(tup) + * seen_ellipsis = True + */ + __pyx_t_2 = (!__pyx_v_seen_ellipsis); + if (__pyx_t_2) { + + /* "View.MemoryView":686 + * if item is Ellipsis: + * if not seen_ellipsis: + * idx += ndim - len(tup) # <<<<<<<<<<<<<< + * seen_ellipsis = True + * have_slices = True + */ + if (unlikely(__pyx_v_tup == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 686, __pyx_L1_error) + } + __pyx_t_5 = __Pyx_PyTuple_GET_SIZE(__pyx_v_tup); if (unlikely(__pyx_t_5 == ((Py_ssize_t)-1))) __PYX_ERR(1, 686, __pyx_L1_error) + __pyx_v_idx = (__pyx_v_idx + (__pyx_v_ndim - __pyx_t_5)); + + /* "View.MemoryView":687 + * if not seen_ellipsis: + * idx += ndim - len(tup) + * seen_ellipsis = True # <<<<<<<<<<<<<< + * have_slices = True + * else: + */ + __pyx_v_seen_ellipsis = 1; + + /* "View.MemoryView":685 + * for item in tup: + * if item is Ellipsis: + * if not seen_ellipsis: # <<<<<<<<<<<<<< + * idx += ndim - len(tup) + * seen_ellipsis = True + */ + } + + /* "View.MemoryView":688 + * idx += ndim - len(tup) + * seen_ellipsis = True + * have_slices = True # <<<<<<<<<<<<<< + * else: + * if isinstance(item, slice): + */ + __pyx_v_have_slices = 1; + + /* "View.MemoryView":684 + * idx = 0 + * for item in tup: + * if item is Ellipsis: # <<<<<<<<<<<<<< + * if not seen_ellipsis: + * idx += ndim - len(tup) + */ + goto __pyx_L5; + } + + /* "View.MemoryView":690 + * have_slices = True + * else: + * if isinstance(item, slice): # <<<<<<<<<<<<<< + * have_slices = True + * elif not PyIndex_Check(item): + */ + /*else*/ { + __pyx_t_2 = PySlice_Check(__pyx_v_item); + if (__pyx_t_2) { + + /* "View.MemoryView":691 + * else: + * if isinstance(item, slice): + * have_slices = True # <<<<<<<<<<<<<< + * elif not PyIndex_Check(item): + * raise TypeError, f"Cannot index with type '{type(item)}'" + */ + __pyx_v_have_slices = 1; + + /* "View.MemoryView":690 + * have_slices = True + * else: + * if isinstance(item, slice): # <<<<<<<<<<<<<< + * have_slices = True + * elif not PyIndex_Check(item): + */ + goto __pyx_L7; + } + + /* "View.MemoryView":692 + * if isinstance(item, slice): + * have_slices = True + * elif not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError, f"Cannot index with type '{type(item)}'" + * result[idx] = item + */ + __pyx_t_2 = (!(PyIndex_Check(__pyx_v_item) != 0)); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":693 + * have_slices = True + * elif not PyIndex_Check(item): + * raise TypeError, f"Cannot index with type '{type(item)}'" # <<<<<<<<<<<<<< + * result[idx] = item + * idx += 1 + */ + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 693, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = 0; + __pyx_t_6 = 127; + __Pyx_INCREF(__pyx_kp_u_Cannot_index_with_type); + __pyx_t_5 += 24; + __Pyx_GIVEREF(__pyx_kp_u_Cannot_index_with_type); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_Cannot_index_with_type); + __pyx_t_7 = __Pyx_PyObject_FormatSimple(((PyObject *)Py_TYPE(__pyx_v_item)), __pyx_empty_unicode); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 693, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_6 = (__Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_7) > __pyx_t_6) ? __Pyx_PyUnicode_MAX_CHAR_VALUE(__pyx_t_7) : __pyx_t_6; + __pyx_t_5 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7); + __pyx_t_7 = 0; + __Pyx_INCREF(__pyx_kp_u__6); + __pyx_t_5 += 1; + __Pyx_GIVEREF(__pyx_kp_u__6); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__6); + __pyx_t_7 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_5, __pyx_t_6); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 693, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_t_7, 0, 0); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __PYX_ERR(1, 693, __pyx_L1_error) + + /* "View.MemoryView":692 + * if isinstance(item, slice): + * have_slices = True + * elif not PyIndex_Check(item): # <<<<<<<<<<<<<< + * raise TypeError, f"Cannot index with type '{type(item)}'" + * result[idx] = item + */ + } + __pyx_L7:; + + /* "View.MemoryView":694 + * elif not PyIndex_Check(item): + * raise TypeError, f"Cannot index with type '{type(item)}'" + * result[idx] = item # <<<<<<<<<<<<<< + * idx += 1 + * + */ + if (unlikely((__Pyx_SetItemInt(__pyx_v_result, __pyx_v_idx, __pyx_v_item, Py_ssize_t, 1, PyInt_FromSsize_t, 1, 1, 1) < 0))) __PYX_ERR(1, 694, __pyx_L1_error) + } + __pyx_L5:; + + /* "View.MemoryView":695 + * raise TypeError, f"Cannot index with type '{type(item)}'" + * result[idx] = item + * idx += 1 # <<<<<<<<<<<<<< + * + * nslices = ndim - idx + */ + __pyx_v_idx = (__pyx_v_idx + 1); + + /* "View.MemoryView":683 + * seen_ellipsis = False + * idx = 0 + * for item in tup: # <<<<<<<<<<<<<< + * if item is Ellipsis: + * if not seen_ellipsis: + */ + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "View.MemoryView":697 + * idx += 1 + * + * nslices = ndim - idx # <<<<<<<<<<<<<< + * return have_slices or nslices, tuple(result) + * + */ + __pyx_v_nslices = (__pyx_v_ndim - __pyx_v_idx); + + /* "View.MemoryView":698 + * + * nslices = ndim - idx + * return have_slices or nslices, tuple(result) # <<<<<<<<<<<<<< + * + * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: + */ + __Pyx_XDECREF(__pyx_r); + if (!__pyx_v_have_slices) { + } else { + __pyx_t_7 = __Pyx_PyBool_FromLong(__pyx_v_have_slices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_1 = __pyx_t_7; + __pyx_t_7 = 0; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_7 = PyInt_FromSsize_t(__pyx_v_nslices); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_1 = __pyx_t_7; + __pyx_t_7 = 0; + __pyx_L9_bool_binop_done:; + __pyx_t_7 = PyList_AsTuple(__pyx_v_result); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_3 = PyTuple_New(2); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 698, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_t_1)) __PYX_ERR(1, 698, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_7); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_7)) __PYX_ERR(1, 698, __pyx_L1_error); + __pyx_t_1 = 0; + __pyx_t_7 = 0; + __pyx_r = ((PyObject*)__pyx_t_3); + __pyx_t_3 = 0; + goto __pyx_L0; + + /* "View.MemoryView":671 + * return isinstance(o, memoryview) + * + * cdef tuple _unellipsify(object index, int ndim): # <<<<<<<<<<<<<< + * """ + * Replace all ellipses with full slices and fill incomplete indices with + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("View.MemoryView._unellipsify", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_tup); + __Pyx_XDECREF(__pyx_v_result); + __Pyx_XDECREF(__pyx_v_item); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + +static int assert_direct_dimensions(Py_ssize_t *__pyx_v_suboffsets, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_suboffset; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + int __pyx_t_4; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assert_direct_dimensions", 0); + + /* "View.MemoryView":701 + * + * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: + * for suboffset in suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * raise ValueError, "Indirect dimensions not supported" + */ + __pyx_t_2 = (__pyx_v_suboffsets + __pyx_v_ndim); + for (__pyx_t_3 = __pyx_v_suboffsets; __pyx_t_3 < __pyx_t_2; __pyx_t_3++) { + __pyx_t_1 = __pyx_t_3; + __pyx_v_suboffset = (__pyx_t_1[0]); + + /* "View.MemoryView":702 + * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError, "Indirect dimensions not supported" + * return 0 # return type just used as an error flag + */ + __pyx_t_4 = (__pyx_v_suboffset >= 0); + if (unlikely(__pyx_t_4)) { + + /* "View.MemoryView":703 + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + * raise ValueError, "Indirect dimensions not supported" # <<<<<<<<<<<<<< + * return 0 # return type just used as an error flag + * + */ + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_kp_s_Indirect_dimensions_not_supporte, 0, 0); + __PYX_ERR(1, 703, __pyx_L1_error) + + /* "View.MemoryView":702 + * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * raise ValueError, "Indirect dimensions not supported" + * return 0 # return type just used as an error flag + */ + } + } + + /* "View.MemoryView":704 + * if suboffset >= 0: + * raise ValueError, "Indirect dimensions not supported" + * return 0 # return type just used as an error flag # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":700 + * return have_slices or nslices, tuple(result) + * + * cdef int assert_direct_dimensions(Py_ssize_t *suboffsets, int ndim) except -1: # <<<<<<<<<<<<<< + * for suboffset in suboffsets[:ndim]: + * if suboffset >= 0: + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView.assert_direct_dimensions", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":711 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + +static struct __pyx_memoryview_obj *__pyx_memview_slice(struct __pyx_memoryview_obj *__pyx_v_memview, PyObject *__pyx_v_indices) { + int __pyx_v_new_ndim; + int __pyx_v_suboffset_dim; + int __pyx_v_dim; + __Pyx_memviewslice __pyx_v_src; + __Pyx_memviewslice __pyx_v_dst; + __Pyx_memviewslice *__pyx_v_p_src; + struct __pyx_memoryviewslice_obj *__pyx_v_memviewsliceobj = 0; + __Pyx_memviewslice *__pyx_v_p_dst; + int *__pyx_v_p_suboffset_dim; + Py_ssize_t __pyx_v_start; + Py_ssize_t __pyx_v_stop; + Py_ssize_t __pyx_v_step; + Py_ssize_t __pyx_v_cindex; + int __pyx_v_have_start; + int __pyx_v_have_stop; + int __pyx_v_have_step; + PyObject *__pyx_v_index = NULL; + struct __pyx_memoryview_obj *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + struct __pyx_memoryview_obj *__pyx_t_3; + char *__pyx_t_4; + int __pyx_t_5; + Py_ssize_t __pyx_t_6; + PyObject *(*__pyx_t_7)(PyObject *); + PyObject *__pyx_t_8 = NULL; + Py_ssize_t __pyx_t_9; + int __pyx_t_10; + Py_ssize_t __pyx_t_11; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memview_slice", 0); + + /* "View.MemoryView":712 + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): + * cdef int new_ndim = 0, suboffset_dim = -1, dim # <<<<<<<<<<<<<< + * cdef bint negative_step + * cdef __Pyx_memviewslice src, dst + */ + __pyx_v_new_ndim = 0; + __pyx_v_suboffset_dim = -1; + + /* "View.MemoryView":719 + * + * + * memset(&dst, 0, sizeof(dst)) # <<<<<<<<<<<<<< + * + * cdef _memoryviewslice memviewsliceobj + */ + (void)(memset((&__pyx_v_dst), 0, (sizeof(__pyx_v_dst)))); + + /* "View.MemoryView":723 + * cdef _memoryviewslice memviewsliceobj + * + * assert memview.view.ndim > 0 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + #ifndef CYTHON_WITHOUT_ASSERTIONS + if (unlikely(__pyx_assertions_enabled())) { + __pyx_t_1 = (__pyx_v_memview->view.ndim > 0); + if (unlikely(!__pyx_t_1)) { + __Pyx_Raise(__pyx_builtin_AssertionError, 0, 0, 0); + __PYX_ERR(1, 723, __pyx_L1_error) + } + } + #else + if ((1)); else __PYX_ERR(1, 723, __pyx_L1_error) + #endif + + /* "View.MemoryView":725 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + if (__pyx_t_1) { + + /* "View.MemoryView":726 + * + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview # <<<<<<<<<<<<<< + * p_src = &memviewsliceobj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 726, __pyx_L1_error) + __pyx_t_2 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_2); + __pyx_v_memviewsliceobj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":727 + * if isinstance(memview, _memoryviewslice): + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, &src) + */ + __pyx_v_p_src = (&__pyx_v_memviewsliceobj->from_slice); + + /* "View.MemoryView":725 + * assert memview.view.ndim > 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * memviewsliceobj = memview + * p_src = &memviewsliceobj.from_slice + */ + goto __pyx_L3; + } + + /* "View.MemoryView":729 + * p_src = &memviewsliceobj.from_slice + * else: + * slice_copy(memview, &src) # <<<<<<<<<<<<<< + * p_src = &src + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_src)); + + /* "View.MemoryView":730 + * else: + * slice_copy(memview, &src) + * p_src = &src # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_p_src = (&__pyx_v_src); + } + __pyx_L3:; + + /* "View.MemoryView":736 + * + * + * dst.memview = p_src.memview # <<<<<<<<<<<<<< + * dst.data = p_src.data + * + */ + __pyx_t_3 = __pyx_v_p_src->memview; + __pyx_v_dst.memview = __pyx_t_3; + + /* "View.MemoryView":737 + * + * dst.memview = p_src.memview + * dst.data = p_src.data # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = __pyx_v_p_src->data; + __pyx_v_dst.data = __pyx_t_4; + + /* "View.MemoryView":742 + * + * + * cdef __Pyx_memviewslice *p_dst = &dst # <<<<<<<<<<<<<< + * cdef int *p_suboffset_dim = &suboffset_dim + * cdef Py_ssize_t start, stop, step, cindex + */ + __pyx_v_p_dst = (&__pyx_v_dst); + + /* "View.MemoryView":743 + * + * cdef __Pyx_memviewslice *p_dst = &dst + * cdef int *p_suboffset_dim = &suboffset_dim # <<<<<<<<<<<<<< + * cdef Py_ssize_t start, stop, step, cindex + * cdef bint have_start, have_stop, have_step + */ + __pyx_v_p_suboffset_dim = (&__pyx_v_suboffset_dim); + + /* "View.MemoryView":747 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * cindex = index + */ + __pyx_t_5 = 0; + if (likely(PyList_CheckExact(__pyx_v_indices)) || PyTuple_CheckExact(__pyx_v_indices)) { + __pyx_t_2 = __pyx_v_indices; __Pyx_INCREF(__pyx_t_2); __pyx_t_6 = 0; + __pyx_t_7 = NULL; + } else { + __pyx_t_6 = -1; __pyx_t_2 = PyObject_GetIter(__pyx_v_indices); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 747, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_7 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_2); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 747, __pyx_L1_error) + } + for (;;) { + if (likely(!__pyx_t_7)) { + if (likely(PyList_CheckExact(__pyx_t_2))) { + if (__pyx_t_6 >= PyList_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_8 = PyList_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely((0 < 0))) __PYX_ERR(1, 747, __pyx_L1_error) + #else + __pyx_t_8 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 747, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + #endif + } else { + if (__pyx_t_6 >= PyTuple_GET_SIZE(__pyx_t_2)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_8 = PyTuple_GET_ITEM(__pyx_t_2, __pyx_t_6); __Pyx_INCREF(__pyx_t_8); __pyx_t_6++; if (unlikely((0 < 0))) __PYX_ERR(1, 747, __pyx_L1_error) + #else + __pyx_t_8 = PySequence_ITEM(__pyx_t_2, __pyx_t_6); __pyx_t_6++; if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 747, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + #endif + } + } else { + __pyx_t_8 = __pyx_t_7(__pyx_t_2); + if (unlikely(!__pyx_t_8)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(1, 747, __pyx_L1_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_8); + } + __Pyx_XDECREF_SET(__pyx_v_index, __pyx_t_8); + __pyx_t_8 = 0; + __pyx_v_dim = __pyx_t_5; + __pyx_t_5 = (__pyx_t_5 + 1); + + /* "View.MemoryView":748 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * cindex = index + * slice_memviewslice( + */ + __pyx_t_1 = (PyIndex_Check(__pyx_v_index) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":749 + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): + * cindex = index # <<<<<<<<<<<<<< + * slice_memviewslice( + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + */ + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_v_index); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 749, __pyx_L1_error) + __pyx_v_cindex = __pyx_t_9; + + /* "View.MemoryView":750 + * if PyIndex_Check(index): + * cindex = index + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_cindex, 0, 0, 0, 0, 0, 0); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 750, __pyx_L1_error) + + /* "View.MemoryView":748 + * + * for dim, index in enumerate(indices): + * if PyIndex_Check(index): # <<<<<<<<<<<<<< + * cindex = index + * slice_memviewslice( + */ + goto __pyx_L6; + } + + /* "View.MemoryView":756 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + __pyx_t_1 = (__pyx_v_index == Py_None); + if (__pyx_t_1) { + + /* "View.MemoryView":757 + * False) + * elif index is None: + * p_dst.shape[new_ndim] = 1 # <<<<<<<<<<<<<< + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + */ + (__pyx_v_p_dst->shape[__pyx_v_new_ndim]) = 1; + + /* "View.MemoryView":758 + * elif index is None: + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 # <<<<<<<<<<<<<< + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 + */ + (__pyx_v_p_dst->strides[__pyx_v_new_ndim]) = 0; + + /* "View.MemoryView":759 + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 # <<<<<<<<<<<<<< + * new_ndim += 1 + * else: + */ + (__pyx_v_p_dst->suboffsets[__pyx_v_new_ndim]) = -1L; + + /* "View.MemoryView":760 + * p_dst.strides[new_ndim] = 0 + * p_dst.suboffsets[new_ndim] = -1 + * new_ndim += 1 # <<<<<<<<<<<<<< + * else: + * start = index.start or 0 + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + + /* "View.MemoryView":756 + * 0, 0, 0, # have_{start,stop,step} + * False) + * elif index is None: # <<<<<<<<<<<<<< + * p_dst.shape[new_ndim] = 1 + * p_dst.strides[new_ndim] = 0 + */ + goto __pyx_L6; + } + + /* "View.MemoryView":762 + * new_ndim += 1 + * else: + * start = index.start or 0 # <<<<<<<<<<<<<< + * stop = index.stop or 0 + * step = index.step or 0 + */ + /*else*/ { + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 762, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 762, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } else { + __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 762, __pyx_L1_error) + __pyx_t_9 = __pyx_t_11; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L7_bool_binop_done; + } + __pyx_t_9 = 0; + __pyx_L7_bool_binop_done:; + __pyx_v_start = __pyx_t_9; + + /* "View.MemoryView":763 + * else: + * start = index.start or 0 + * stop = index.stop or 0 # <<<<<<<<<<<<<< + * step = index.step or 0 + * + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 763, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 763, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } else { + __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 763, __pyx_L1_error) + __pyx_t_9 = __pyx_t_11; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L9_bool_binop_done; + } + __pyx_t_9 = 0; + __pyx_L9_bool_binop_done:; + __pyx_v_stop = __pyx_t_9; + + /* "View.MemoryView":764 + * start = index.start or 0 + * stop = index.stop or 0 + * step = index.step or 0 # <<<<<<<<<<<<<< + * + * have_start = index.start is not None + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 764, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_1 = __Pyx_PyObject_IsTrue(__pyx_t_8); if (unlikely((__pyx_t_1 < 0))) __PYX_ERR(1, 764, __pyx_L1_error) + if (!__pyx_t_1) { + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + } else { + __pyx_t_11 = __Pyx_PyIndex_AsSsize_t(__pyx_t_8); if (unlikely((__pyx_t_11 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 764, __pyx_L1_error) + __pyx_t_9 = __pyx_t_11; + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_9 = 0; + __pyx_L11_bool_binop_done:; + __pyx_v_step = __pyx_t_9; + + /* "View.MemoryView":766 + * step = index.step or 0 + * + * have_start = index.start is not None # <<<<<<<<<<<<<< + * have_stop = index.stop is not None + * have_step = index.step is not None + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_start); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 766, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_1 = (__pyx_t_8 != Py_None); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_v_have_start = __pyx_t_1; + + /* "View.MemoryView":767 + * + * have_start = index.start is not None + * have_stop = index.stop is not None # <<<<<<<<<<<<<< + * have_step = index.step is not None + * + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_stop); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 767, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_1 = (__pyx_t_8 != Py_None); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_v_have_stop = __pyx_t_1; + + /* "View.MemoryView":768 + * have_start = index.start is not None + * have_stop = index.stop is not None + * have_step = index.step is not None # <<<<<<<<<<<<<< + * + * slice_memviewslice( + */ + __pyx_t_8 = __Pyx_PyObject_GetAttrStr(__pyx_v_index, __pyx_n_s_step); if (unlikely(!__pyx_t_8)) __PYX_ERR(1, 768, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_8); + __pyx_t_1 = (__pyx_t_8 != Py_None); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __pyx_v_have_step = __pyx_t_1; + + /* "View.MemoryView":770 + * have_step = index.step is not None + * + * slice_memviewslice( # <<<<<<<<<<<<<< + * p_dst, p_src.shape[dim], p_src.strides[dim], p_src.suboffsets[dim], + * dim, new_ndim, p_suboffset_dim, + */ + __pyx_t_10 = __pyx_memoryview_slice_memviewslice(__pyx_v_p_dst, (__pyx_v_p_src->shape[__pyx_v_dim]), (__pyx_v_p_src->strides[__pyx_v_dim]), (__pyx_v_p_src->suboffsets[__pyx_v_dim]), __pyx_v_dim, __pyx_v_new_ndim, __pyx_v_p_suboffset_dim, __pyx_v_start, __pyx_v_stop, __pyx_v_step, __pyx_v_have_start, __pyx_v_have_stop, __pyx_v_have_step, 1); if (unlikely(__pyx_t_10 == ((int)-1))) __PYX_ERR(1, 770, __pyx_L1_error) + + /* "View.MemoryView":776 + * have_start, have_stop, have_step, + * True) + * new_ndim += 1 # <<<<<<<<<<<<<< + * + * if isinstance(memview, _memoryviewslice): + */ + __pyx_v_new_ndim = (__pyx_v_new_ndim + 1); + } + __pyx_L6:; + + /* "View.MemoryView":747 + * cdef bint have_start, have_stop, have_step + * + * for dim, index in enumerate(indices): # <<<<<<<<<<<<<< + * if PyIndex_Check(index): + * cindex = index + */ + } + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + + /* "View.MemoryView":778 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + if (__pyx_t_1) { + + /* "View.MemoryView":779 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __Pyx_XDECREF((PyObject *)__pyx_r); + + /* "View.MemoryView":780 + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, # <<<<<<<<<<<<<< + * memviewsliceobj.to_dtype_func, + * memview.dtype_is_object) + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 780, __pyx_L1_error) } + + /* "View.MemoryView":781 + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * else: + */ + if (unlikely(!__pyx_v_memviewsliceobj)) { __Pyx_RaiseUnboundLocalError("memviewsliceobj"); __PYX_ERR(1, 781, __pyx_L1_error) } + + /* "View.MemoryView":779 + * + * if isinstance(memview, _memoryviewslice): + * return memoryview_fromslice(dst, new_ndim, # <<<<<<<<<<<<<< + * memviewsliceobj.to_object_func, + * memviewsliceobj.to_dtype_func, + */ + __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, __pyx_v_memviewsliceobj->to_object_func, __pyx_v_memviewsliceobj->to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 779, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_memoryview_type))))) __PYX_ERR(1, 779, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":778 + * new_ndim += 1 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * return memoryview_fromslice(dst, new_ndim, + * memviewsliceobj.to_object_func, + */ + } + + /* "View.MemoryView":784 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + /*else*/ { + __Pyx_XDECREF((PyObject *)__pyx_r); + + /* "View.MemoryView":785 + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_dst, __pyx_v_new_ndim, NULL, NULL, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 784, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + + /* "View.MemoryView":784 + * memview.dtype_is_object) + * else: + * return memoryview_fromslice(dst, new_ndim, NULL, NULL, # <<<<<<<<<<<<<< + * memview.dtype_is_object) + * + */ + if (!(likely(((__pyx_t_2) == Py_None) || likely(__Pyx_TypeTest(__pyx_t_2, __pyx_memoryview_type))))) __PYX_ERR(1, 784, __pyx_L1_error) + __pyx_r = ((struct __pyx_memoryview_obj *)__pyx_t_2); + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":711 + * + * @cname('__pyx_memview_slice') + * cdef memoryview memview_slice(memoryview memview, object indices): # <<<<<<<<<<<<<< + * cdef int new_ndim = 0, suboffset_dim = -1, dim + * cdef bint negative_step + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("View.MemoryView.memview_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_memviewsliceobj); + __Pyx_XDECREF(__pyx_v_index); + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":793 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + +static int __pyx_memoryview_slice_memviewslice(__Pyx_memviewslice *__pyx_v_dst, Py_ssize_t __pyx_v_shape, Py_ssize_t __pyx_v_stride, Py_ssize_t __pyx_v_suboffset, int __pyx_v_dim, int __pyx_v_new_ndim, int *__pyx_v_suboffset_dim, Py_ssize_t __pyx_v_start, Py_ssize_t __pyx_v_stop, Py_ssize_t __pyx_v_step, int __pyx_v_have_start, int __pyx_v_have_stop, int __pyx_v_have_step, int __pyx_v_is_slice) { + Py_ssize_t __pyx_v_new_shape; + int __pyx_v_negative_step; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save; + #endif + __Pyx_RefNannySetupContext("slice_memviewslice", 1); + + /* "View.MemoryView":813 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + __pyx_t_1 = (!__pyx_v_is_slice); + if (__pyx_t_1) { + + /* "View.MemoryView":815 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + __pyx_t_1 = (__pyx_v_start < 0); + if (__pyx_t_1) { + + /* "View.MemoryView":816 + * + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if not 0 <= start < shape: + * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":815 + * if not is_slice: + * + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if not 0 <= start < shape: + */ + } + + /* "View.MemoryView":817 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + __pyx_t_1 = (0 <= __pyx_v_start); + if (__pyx_t_1) { + __pyx_t_1 = (__pyx_v_start < __pyx_v_shape); + } + __pyx_t_2 = (!__pyx_t_1); + if (__pyx_t_2) { + + /* "View.MemoryView":818 + * start += shape + * if not 0 <= start < shape: + * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) # <<<<<<<<<<<<<< + * else: + * + */ + __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_kp_s_Index_out_of_bounds_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 818, __pyx_L1_error) + + /* "View.MemoryView":817 + * if start < 0: + * start += shape + * if not 0 <= start < shape: # <<<<<<<<<<<<<< + * _err_dim(PyExc_IndexError, "Index out of bounds (axis %d)", dim) + * else: + */ + } + + /* "View.MemoryView":813 + * cdef bint negative_step + * + * if not is_slice: # <<<<<<<<<<<<<< + * + * if start < 0: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":821 + * else: + * + * if have_step: # <<<<<<<<<<<<<< + * negative_step = step < 0 + * if step == 0: + */ + /*else*/ { + __pyx_t_2 = (__pyx_v_have_step != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":822 + * + * if have_step: + * negative_step = step < 0 # <<<<<<<<<<<<<< + * if step == 0: + * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) + */ + __pyx_v_negative_step = (__pyx_v_step < 0); + + /* "View.MemoryView":823 + * if have_step: + * negative_step = step < 0 + * if step == 0: # <<<<<<<<<<<<<< + * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) + * else: + */ + __pyx_t_2 = (__pyx_v_step == 0); + if (__pyx_t_2) { + + /* "View.MemoryView":824 + * negative_step = step < 0 + * if step == 0: + * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) # <<<<<<<<<<<<<< + * else: + * negative_step = False + */ + __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_kp_s_Step_may_not_be_zero_axis_d, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 824, __pyx_L1_error) + + /* "View.MemoryView":823 + * if have_step: + * negative_step = step < 0 + * if step == 0: # <<<<<<<<<<<<<< + * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) + * else: + */ + } + + /* "View.MemoryView":821 + * else: + * + * if have_step: # <<<<<<<<<<<<<< + * negative_step = step < 0 + * if step == 0: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":826 + * _err_dim(PyExc_ValueError, "Step may not be zero (axis %d)", dim) + * else: + * negative_step = False # <<<<<<<<<<<<<< + * step = 1 + * + */ + /*else*/ { + __pyx_v_negative_step = 0; + + /* "View.MemoryView":827 + * else: + * negative_step = False + * step = 1 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_step = 1; + } + __pyx_L6:; + + /* "View.MemoryView":830 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + __pyx_t_2 = (__pyx_v_have_start != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":831 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + __pyx_t_2 = (__pyx_v_start < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":832 + * if have_start: + * if start < 0: + * start += shape # <<<<<<<<<<<<<< + * if start < 0: + * start = 0 + */ + __pyx_v_start = (__pyx_v_start + __pyx_v_shape); + + /* "View.MemoryView":833 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + __pyx_t_2 = (__pyx_v_start < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":834 + * start += shape + * if start < 0: + * start = 0 # <<<<<<<<<<<<<< + * elif start >= shape: + * if negative_step: + */ + __pyx_v_start = 0; + + /* "View.MemoryView":833 + * if start < 0: + * start += shape + * if start < 0: # <<<<<<<<<<<<<< + * start = 0 + * elif start >= shape: + */ + } + + /* "View.MemoryView":831 + * + * if have_start: + * if start < 0: # <<<<<<<<<<<<<< + * start += shape + * if start < 0: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":835 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + __pyx_t_2 = (__pyx_v_start >= __pyx_v_shape); + if (__pyx_t_2) { + + /* "View.MemoryView":836 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + if (__pyx_v_negative_step) { + + /* "View.MemoryView":837 + * elif start >= shape: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = shape + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":836 + * start = 0 + * elif start >= shape: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L11; + } + + /* "View.MemoryView":839 + * start = shape - 1 + * else: + * start = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + /*else*/ { + __pyx_v_start = __pyx_v_shape; + } + __pyx_L11:; + + /* "View.MemoryView":835 + * if start < 0: + * start = 0 + * elif start >= shape: # <<<<<<<<<<<<<< + * if negative_step: + * start = shape - 1 + */ + } + __pyx_L9:; + + /* "View.MemoryView":830 + * + * + * if have_start: # <<<<<<<<<<<<<< + * if start < 0: + * start += shape + */ + goto __pyx_L8; + } + + /* "View.MemoryView":841 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + /*else*/ { + if (__pyx_v_negative_step) { + + /* "View.MemoryView":842 + * else: + * if negative_step: + * start = shape - 1 # <<<<<<<<<<<<<< + * else: + * start = 0 + */ + __pyx_v_start = (__pyx_v_shape - 1); + + /* "View.MemoryView":841 + * start = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * start = shape - 1 + * else: + */ + goto __pyx_L12; + } + + /* "View.MemoryView":844 + * start = shape - 1 + * else: + * start = 0 # <<<<<<<<<<<<<< + * + * if have_stop: + */ + /*else*/ { + __pyx_v_start = 0; + } + __pyx_L12:; + } + __pyx_L8:; + + /* "View.MemoryView":846 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + __pyx_t_2 = (__pyx_v_have_stop != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":847 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + __pyx_t_2 = (__pyx_v_stop < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":848 + * if have_stop: + * if stop < 0: + * stop += shape # <<<<<<<<<<<<<< + * if stop < 0: + * stop = 0 + */ + __pyx_v_stop = (__pyx_v_stop + __pyx_v_shape); + + /* "View.MemoryView":849 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + __pyx_t_2 = (__pyx_v_stop < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":850 + * stop += shape + * if stop < 0: + * stop = 0 # <<<<<<<<<<<<<< + * elif stop > shape: + * stop = shape + */ + __pyx_v_stop = 0; + + /* "View.MemoryView":849 + * if stop < 0: + * stop += shape + * if stop < 0: # <<<<<<<<<<<<<< + * stop = 0 + * elif stop > shape: + */ + } + + /* "View.MemoryView":847 + * + * if have_stop: + * if stop < 0: # <<<<<<<<<<<<<< + * stop += shape + * if stop < 0: + */ + goto __pyx_L14; + } + + /* "View.MemoryView":851 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + __pyx_t_2 = (__pyx_v_stop > __pyx_v_shape); + if (__pyx_t_2) { + + /* "View.MemoryView":852 + * stop = 0 + * elif stop > shape: + * stop = shape # <<<<<<<<<<<<<< + * else: + * if negative_step: + */ + __pyx_v_stop = __pyx_v_shape; + + /* "View.MemoryView":851 + * if stop < 0: + * stop = 0 + * elif stop > shape: # <<<<<<<<<<<<<< + * stop = shape + * else: + */ + } + __pyx_L14:; + + /* "View.MemoryView":846 + * start = 0 + * + * if have_stop: # <<<<<<<<<<<<<< + * if stop < 0: + * stop += shape + */ + goto __pyx_L13; + } + + /* "View.MemoryView":854 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + /*else*/ { + if (__pyx_v_negative_step) { + + /* "View.MemoryView":855 + * else: + * if negative_step: + * stop = -1 # <<<<<<<<<<<<<< + * else: + * stop = shape + */ + __pyx_v_stop = -1L; + + /* "View.MemoryView":854 + * stop = shape + * else: + * if negative_step: # <<<<<<<<<<<<<< + * stop = -1 + * else: + */ + goto __pyx_L16; + } + + /* "View.MemoryView":857 + * stop = -1 + * else: + * stop = shape # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __pyx_v_stop = __pyx_v_shape; + } + __pyx_L16:; + } + __pyx_L13:; + + /* "View.MemoryView":861 + * + * with cython.cdivision(True): + * new_shape = (stop - start) // step # <<<<<<<<<<<<<< + * + * if (stop - start) - step * new_shape: + */ + __pyx_v_new_shape = ((__pyx_v_stop - __pyx_v_start) / __pyx_v_step); + + /* "View.MemoryView":863 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + __pyx_t_2 = (((__pyx_v_stop - __pyx_v_start) - (__pyx_v_step * __pyx_v_new_shape)) != 0); + if (__pyx_t_2) { + + /* "View.MemoryView":864 + * + * if (stop - start) - step * new_shape: + * new_shape += 1 # <<<<<<<<<<<<<< + * + * if new_shape < 0: + */ + __pyx_v_new_shape = (__pyx_v_new_shape + 1); + + /* "View.MemoryView":863 + * new_shape = (stop - start) // step + * + * if (stop - start) - step * new_shape: # <<<<<<<<<<<<<< + * new_shape += 1 + * + */ + } + + /* "View.MemoryView":866 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + __pyx_t_2 = (__pyx_v_new_shape < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":867 + * + * if new_shape < 0: + * new_shape = 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_new_shape = 0; + + /* "View.MemoryView":866 + * new_shape += 1 + * + * if new_shape < 0: # <<<<<<<<<<<<<< + * new_shape = 0 + * + */ + } + + /* "View.MemoryView":870 + * + * + * dst.strides[new_ndim] = stride * step # <<<<<<<<<<<<<< + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset + */ + (__pyx_v_dst->strides[__pyx_v_new_ndim]) = (__pyx_v_stride * __pyx_v_step); + + /* "View.MemoryView":871 + * + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape # <<<<<<<<<<<<<< + * dst.suboffsets[new_ndim] = suboffset + * + */ + (__pyx_v_dst->shape[__pyx_v_new_ndim]) = __pyx_v_new_shape; + + /* "View.MemoryView":872 + * dst.strides[new_ndim] = stride * step + * dst.shape[new_ndim] = new_shape + * dst.suboffsets[new_ndim] = suboffset # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_dst->suboffsets[__pyx_v_new_ndim]) = __pyx_v_suboffset; + } + __pyx_L3:; + + /* "View.MemoryView":875 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + __pyx_t_2 = ((__pyx_v_suboffset_dim[0]) < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":876 + * + * if suboffset_dim[0] < 0: + * dst.data += start * stride # <<<<<<<<<<<<<< + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride + */ + __pyx_v_dst->data = (__pyx_v_dst->data + (__pyx_v_start * __pyx_v_stride)); + + /* "View.MemoryView":875 + * + * + * if suboffset_dim[0] < 0: # <<<<<<<<<<<<<< + * dst.data += start * stride + * else: + */ + goto __pyx_L19; + } + + /* "View.MemoryView":878 + * dst.data += start * stride + * else: + * dst.suboffsets[suboffset_dim[0]] += start * stride # <<<<<<<<<<<<<< + * + * if suboffset >= 0: + */ + /*else*/ { + __pyx_t_3 = (__pyx_v_suboffset_dim[0]); + (__pyx_v_dst->suboffsets[__pyx_t_3]) = ((__pyx_v_dst->suboffsets[__pyx_t_3]) + (__pyx_v_start * __pyx_v_stride)); + } + __pyx_L19:; + + /* "View.MemoryView":880 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + __pyx_t_2 = (__pyx_v_suboffset >= 0); + if (__pyx_t_2) { + + /* "View.MemoryView":881 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + __pyx_t_2 = (!__pyx_v_is_slice); + if (__pyx_t_2) { + + /* "View.MemoryView":882 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + __pyx_t_2 = (__pyx_v_new_ndim == 0); + if (__pyx_t_2) { + + /* "View.MemoryView":883 + * if not is_slice: + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset # <<<<<<<<<<<<<< + * else: + * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " + */ + __pyx_v_dst->data = ((((char **)__pyx_v_dst->data)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":882 + * if suboffset >= 0: + * if not is_slice: + * if new_ndim == 0: # <<<<<<<<<<<<<< + * dst.data = ( dst.data)[0] + suboffset + * else: + */ + goto __pyx_L22; + } + + /* "View.MemoryView":885 + * dst.data = ( dst.data)[0] + suboffset + * else: + * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " # <<<<<<<<<<<<<< + * "must be indexed and not sliced", dim) + * else: + */ + /*else*/ { + + /* "View.MemoryView":886 + * else: + * _err_dim(PyExc_IndexError, "All dimensions preceding dimension %d " + * "must be indexed and not sliced", dim) # <<<<<<<<<<<<<< + * else: + * suboffset_dim[0] = new_ndim + */ + __pyx_t_3 = __pyx_memoryview_err_dim(PyExc_IndexError, __pyx_kp_s_All_dimensions_preceding_dimensi, __pyx_v_dim); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 885, __pyx_L1_error) + } + __pyx_L22:; + + /* "View.MemoryView":881 + * + * if suboffset >= 0: + * if not is_slice: # <<<<<<<<<<<<<< + * if new_ndim == 0: + * dst.data = ( dst.data)[0] + suboffset + */ + goto __pyx_L21; + } + + /* "View.MemoryView":888 + * "must be indexed and not sliced", dim) + * else: + * suboffset_dim[0] = new_ndim # <<<<<<<<<<<<<< + * + * return 0 + */ + /*else*/ { + (__pyx_v_suboffset_dim[0]) = __pyx_v_new_ndim; + } + __pyx_L21:; + + /* "View.MemoryView":880 + * dst.suboffsets[suboffset_dim[0]] += start * stride + * + * if suboffset >= 0: # <<<<<<<<<<<<<< + * if not is_slice: + * if new_ndim == 0: + */ + } + + /* "View.MemoryView":890 + * suboffset_dim[0] = new_ndim + * + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":793 + * + * @cname('__pyx_memoryview_slice_memviewslice') + * cdef int slice_memviewslice( # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * Py_ssize_t shape, Py_ssize_t stride, Py_ssize_t suboffset, + */ + + /* function exit code */ + __pyx_L1_error:; + #ifdef WITH_THREAD + __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.slice_memviewslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __pyx_L0:; + __Pyx_RefNannyFinishContextNogil() + return __pyx_r; +} + +/* "View.MemoryView":896 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + +static char *__pyx_pybuffer_index(Py_buffer *__pyx_v_view, char *__pyx_v_bufp, Py_ssize_t __pyx_v_index, Py_ssize_t __pyx_v_dim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_suboffset; + Py_ssize_t __pyx_v_itemsize; + char *__pyx_v_resultp; + char *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + Py_UCS4 __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("pybuffer_index", 0); + + /* "View.MemoryView":898 + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 # <<<<<<<<<<<<<< + * cdef Py_ssize_t itemsize = view.itemsize + * cdef char *resultp + */ + __pyx_v_suboffset = -1L; + + /* "View.MemoryView":899 + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + * cdef Py_ssize_t itemsize = view.itemsize # <<<<<<<<<<<<<< + * cdef char *resultp + * + */ + __pyx_t_1 = __pyx_v_view->itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":902 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len // itemsize + * stride = itemsize + */ + __pyx_t_2 = (__pyx_v_view->ndim == 0); + if (__pyx_t_2) { + + /* "View.MemoryView":903 + * + * if view.ndim == 0: + * shape = view.len // itemsize # <<<<<<<<<<<<<< + * stride = itemsize + * else: + */ + if (unlikely(__pyx_v_itemsize == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "integer division or modulo by zero"); + __PYX_ERR(1, 903, __pyx_L1_error) + } + else if (sizeof(Py_ssize_t) == sizeof(long) && (!(((Py_ssize_t)-1) > 0)) && unlikely(__pyx_v_itemsize == (Py_ssize_t)-1) && unlikely(__Pyx_UNARY_NEG_WOULD_OVERFLOW(__pyx_v_view->len))) { + PyErr_SetString(PyExc_OverflowError, "value too large to perform division"); + __PYX_ERR(1, 903, __pyx_L1_error) + } + __pyx_v_shape = __Pyx_div_Py_ssize_t(__pyx_v_view->len, __pyx_v_itemsize); + + /* "View.MemoryView":904 + * if view.ndim == 0: + * shape = view.len // itemsize + * stride = itemsize # <<<<<<<<<<<<<< + * else: + * shape = view.shape[dim] + */ + __pyx_v_stride = __pyx_v_itemsize; + + /* "View.MemoryView":902 + * cdef char *resultp + * + * if view.ndim == 0: # <<<<<<<<<<<<<< + * shape = view.len // itemsize + * stride = itemsize + */ + goto __pyx_L3; + } + + /* "View.MemoryView":906 + * stride = itemsize + * else: + * shape = view.shape[dim] # <<<<<<<<<<<<<< + * stride = view.strides[dim] + * if view.suboffsets != NULL: + */ + /*else*/ { + __pyx_v_shape = (__pyx_v_view->shape[__pyx_v_dim]); + + /* "View.MemoryView":907 + * else: + * shape = view.shape[dim] + * stride = view.strides[dim] # <<<<<<<<<<<<<< + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] + */ + __pyx_v_stride = (__pyx_v_view->strides[__pyx_v_dim]); + + /* "View.MemoryView":908 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + __pyx_t_2 = (__pyx_v_view->suboffsets != NULL); + if (__pyx_t_2) { + + /* "View.MemoryView":909 + * stride = view.strides[dim] + * if view.suboffsets != NULL: + * suboffset = view.suboffsets[dim] # <<<<<<<<<<<<<< + * + * if index < 0: + */ + __pyx_v_suboffset = (__pyx_v_view->suboffsets[__pyx_v_dim]); + + /* "View.MemoryView":908 + * shape = view.shape[dim] + * stride = view.strides[dim] + * if view.suboffsets != NULL: # <<<<<<<<<<<<<< + * suboffset = view.suboffsets[dim] + * + */ + } + } + __pyx_L3:; + + /* "View.MemoryView":911 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + __pyx_t_2 = (__pyx_v_index < 0); + if (__pyx_t_2) { + + /* "View.MemoryView":912 + * + * if index < 0: + * index += view.shape[dim] # <<<<<<<<<<<<<< + * if index < 0: + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + */ + __pyx_v_index = (__pyx_v_index + (__pyx_v_view->shape[__pyx_v_dim])); + + /* "View.MemoryView":913 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + */ + __pyx_t_2 = (__pyx_v_index < 0); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":914 + * index += view.shape[dim] + * if index < 0: + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" # <<<<<<<<<<<<<< + * + * if index >= shape: + */ + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 914, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = 0; + __pyx_t_4 = 127; + __Pyx_INCREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); + __pyx_t_1 += 37; + __Pyx_GIVEREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); + PyTuple_SET_ITEM(__pyx_t_3, 0, __pyx_kp_u_Out_of_bounds_on_buffer_access_a); + __pyx_t_5 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 914, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_5); + __Pyx_GIVEREF(__pyx_t_5); + PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_t_5); + __pyx_t_5 = 0; + __Pyx_INCREF(__pyx_kp_u__7); + __pyx_t_1 += 1; + __Pyx_GIVEREF(__pyx_kp_u__7); + PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_kp_u__7); + __pyx_t_5 = __Pyx_PyUnicode_Join(__pyx_t_3, 3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 914, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_builtin_IndexError, __pyx_t_5, 0, 0); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __PYX_ERR(1, 914, __pyx_L1_error) + + /* "View.MemoryView":913 + * if index < 0: + * index += view.shape[dim] + * if index < 0: # <<<<<<<<<<<<<< + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + */ + } + + /* "View.MemoryView":911 + * suboffset = view.suboffsets[dim] + * + * if index < 0: # <<<<<<<<<<<<<< + * index += view.shape[dim] + * if index < 0: + */ + } + + /* "View.MemoryView":916 + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + */ + __pyx_t_2 = (__pyx_v_index >= __pyx_v_shape); + if (unlikely(__pyx_t_2)) { + + /* "View.MemoryView":917 + * + * if index >= shape: + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" # <<<<<<<<<<<<<< + * + * resultp = bufp + index * stride + */ + __pyx_t_5 = PyTuple_New(3); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 917, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_1 = 0; + __pyx_t_4 = 127; + __Pyx_INCREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); + __pyx_t_1 += 37; + __Pyx_GIVEREF(__pyx_kp_u_Out_of_bounds_on_buffer_access_a); + PyTuple_SET_ITEM(__pyx_t_5, 0, __pyx_kp_u_Out_of_bounds_on_buffer_access_a); + __pyx_t_3 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_dim, 0, ' ', 'd'); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_3); + __Pyx_GIVEREF(__pyx_t_3); + PyTuple_SET_ITEM(__pyx_t_5, 1, __pyx_t_3); + __pyx_t_3 = 0; + __Pyx_INCREF(__pyx_kp_u__7); + __pyx_t_1 += 1; + __Pyx_GIVEREF(__pyx_kp_u__7); + PyTuple_SET_ITEM(__pyx_t_5, 2, __pyx_kp_u__7); + __pyx_t_3 = __Pyx_PyUnicode_Join(__pyx_t_5, 3, __pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 917, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_Raise(__pyx_builtin_IndexError, __pyx_t_3, 0, 0); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __PYX_ERR(1, 917, __pyx_L1_error) + + /* "View.MemoryView":916 + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + * if index >= shape: # <<<<<<<<<<<<<< + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + */ + } + + /* "View.MemoryView":919 + * raise IndexError, f"Out of bounds on buffer access (axis {dim})" + * + * resultp = bufp + index * stride # <<<<<<<<<<<<<< + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset + */ + __pyx_v_resultp = (__pyx_v_bufp + (__pyx_v_index * __pyx_v_stride)); + + /* "View.MemoryView":920 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + __pyx_t_2 = (__pyx_v_suboffset >= 0); + if (__pyx_t_2) { + + /* "View.MemoryView":921 + * resultp = bufp + index * stride + * if suboffset >= 0: + * resultp = ( resultp)[0] + suboffset # <<<<<<<<<<<<<< + * + * return resultp + */ + __pyx_v_resultp = ((((char **)__pyx_v_resultp)[0]) + __pyx_v_suboffset); + + /* "View.MemoryView":920 + * + * resultp = bufp + index * stride + * if suboffset >= 0: # <<<<<<<<<<<<<< + * resultp = ( resultp)[0] + suboffset + * + */ + } + + /* "View.MemoryView":923 + * resultp = ( resultp)[0] + suboffset + * + * return resultp # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_resultp; + goto __pyx_L0; + + /* "View.MemoryView":896 + * + * @cname('__pyx_pybuffer_index') + * cdef char *pybuffer_index(Py_buffer *view, char *bufp, Py_ssize_t index, # <<<<<<<<<<<<<< + * Py_ssize_t dim) except NULL: + * cdef Py_ssize_t shape, stride, suboffset = -1 + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("View.MemoryView.pybuffer_index", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":929 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + +static int __pyx_memslice_transpose(__Pyx_memviewslice *__pyx_v_memslice) { + int __pyx_v_ndim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + int __pyx_v_i; + int __pyx_v_j; + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + Py_ssize_t *__pyx_t_2; + long __pyx_t_3; + long __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + int __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save; + #endif + __Pyx_RefNannySetupContext("transpose_memslice", 1); + + /* "View.MemoryView":930 + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: + * cdef int ndim = memslice.memview.view.ndim # <<<<<<<<<<<<<< + * + * cdef Py_ssize_t *shape = memslice.shape + */ + __pyx_t_1 = __pyx_v_memslice->memview->view.ndim; + __pyx_v_ndim = __pyx_t_1; + + /* "View.MemoryView":932 + * cdef int ndim = memslice.memview.view.ndim + * + * cdef Py_ssize_t *shape = memslice.shape # <<<<<<<<<<<<<< + * cdef Py_ssize_t *strides = memslice.strides + * + */ + __pyx_t_2 = __pyx_v_memslice->shape; + __pyx_v_shape = __pyx_t_2; + + /* "View.MemoryView":933 + * + * cdef Py_ssize_t *shape = memslice.shape + * cdef Py_ssize_t *strides = memslice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = __pyx_v_memslice->strides; + __pyx_v_strides = __pyx_t_2; + + /* "View.MemoryView":937 + * + * cdef int i, j + * for i in range(ndim // 2): # <<<<<<<<<<<<<< + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + */ + __pyx_t_3 = __Pyx_div_long(__pyx_v_ndim, 2); + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_1 = 0; __pyx_t_1 < __pyx_t_4; __pyx_t_1+=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":938 + * cdef int i, j + * for i in range(ndim // 2): + * j = ndim - 1 - i # <<<<<<<<<<<<<< + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] + */ + __pyx_v_j = ((__pyx_v_ndim - 1) - __pyx_v_i); + + /* "View.MemoryView":939 + * for i in range(ndim // 2): + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] # <<<<<<<<<<<<<< + * shape[i], shape[j] = shape[j], shape[i] + * + */ + __pyx_t_5 = (__pyx_v_strides[__pyx_v_j]); + __pyx_t_6 = (__pyx_v_strides[__pyx_v_i]); + (__pyx_v_strides[__pyx_v_i]) = __pyx_t_5; + (__pyx_v_strides[__pyx_v_j]) = __pyx_t_6; + + /* "View.MemoryView":940 + * j = ndim - 1 - i + * strides[i], strides[j] = strides[j], strides[i] + * shape[i], shape[j] = shape[j], shape[i] # <<<<<<<<<<<<<< + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + */ + __pyx_t_6 = (__pyx_v_shape[__pyx_v_j]); + __pyx_t_5 = (__pyx_v_shape[__pyx_v_i]); + (__pyx_v_shape[__pyx_v_i]) = __pyx_t_6; + (__pyx_v_shape[__pyx_v_j]) = __pyx_t_5; + + /* "View.MemoryView":942 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_i]) >= 0); + if (!__pyx_t_8) { + } else { + __pyx_t_7 = __pyx_t_8; + goto __pyx_L6_bool_binop_done; + } + __pyx_t_8 = ((__pyx_v_memslice->suboffsets[__pyx_v_j]) >= 0); + __pyx_t_7 = __pyx_t_8; + __pyx_L6_bool_binop_done:; + if (__pyx_t_7) { + + /* "View.MemoryView":943 + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: + * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") # <<<<<<<<<<<<<< + * + * return 0 + */ + __pyx_t_9 = __pyx_memoryview_err(PyExc_ValueError, __pyx_kp_s_Cannot_transpose_memoryview_with); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(1, 943, __pyx_L1_error) + + /* "View.MemoryView":942 + * shape[i], shape[j] = shape[j], shape[i] + * + * if memslice.suboffsets[i] >= 0 or memslice.suboffsets[j] >= 0: # <<<<<<<<<<<<<< + * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") + * + */ + } + } + + /* "View.MemoryView":945 + * _err(PyExc_ValueError, "Cannot transpose memoryview with indirect dimensions") + * + * return 0 # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":929 + * + * @cname('__pyx_memslice_transpose') + * cdef int transpose_memslice(__Pyx_memviewslice *memslice) except -1 nogil: # <<<<<<<<<<<<<< + * cdef int ndim = memslice.memview.view.ndim + * + */ + + /* function exit code */ + __pyx_L1_error:; + #ifdef WITH_THREAD + __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.transpose_memslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __pyx_L0:; + __Pyx_RefNannyFinishContextNogil() + return __pyx_r; +} + +/* "View.MemoryView":963 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) + * + */ + +/* Python wrapper */ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self); /*proto*/ +static void __pyx_memoryviewslice___dealloc__(PyObject *__pyx_v_self) { + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__ (wrapper)", 0); + __pyx_kwvalues = __Pyx_KwValues_VARARGS(__pyx_args, __pyx_nargs); + __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +static void __pyx_memoryviewslice___pyx_pf_15View_dot_MemoryView_16_memoryviewslice___dealloc__(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__dealloc__", 0); + + /* "View.MemoryView":964 + * + * def __dealloc__(self): + * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) # <<<<<<<<<<<<<< + * + * cdef convert_item_to_object(self, char *itemp): + */ + __PYX_XCLEAR_MEMVIEW((&__pyx_v_self->from_slice), 1); + + /* "View.MemoryView":963 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * def __dealloc__(self): # <<<<<<<<<<<<<< + * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) + * + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":966 + * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + +static PyObject *__pyx_memoryviewslice_convert_item_to_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("convert_item_to_object", 0); + + /* "View.MemoryView":967 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + __pyx_t_1 = (__pyx_v_self->to_object_func != NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":968 + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) # <<<<<<<<<<<<<< + * else: + * return memoryview.convert_item_to_object(self, itemp) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_v_self->to_object_func(__pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 968, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "View.MemoryView":967 + * + * cdef convert_item_to_object(self, char *itemp): + * if self.to_object_func != NULL: # <<<<<<<<<<<<<< + * return self.to_object_func(itemp) + * else: + */ + } + + /* "View.MemoryView":970 + * return self.to_object_func(itemp) + * else: + * return memoryview.convert_item_to_object(self, itemp) # <<<<<<<<<<<<<< + * + * cdef assign_item_from_object(self, char *itemp, object value): + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = __pyx_memoryview_convert_item_to_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 970, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + } + + /* "View.MemoryView":966 + * __PYX_XCLEAR_MEMVIEW(&self.from_slice, 1) + * + * cdef convert_item_to_object(self, char *itemp): # <<<<<<<<<<<<<< + * if self.to_object_func != NULL: + * return self.to_object_func(itemp) + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.convert_item_to_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":972 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + +static PyObject *__pyx_memoryviewslice_assign_item_from_object(struct __pyx_memoryviewslice_obj *__pyx_v_self, char *__pyx_v_itemp, PyObject *__pyx_v_value) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("assign_item_from_object", 0); + + /* "View.MemoryView":973 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + __pyx_t_1 = (__pyx_v_self->to_dtype_func != NULL); + if (__pyx_t_1) { + + /* "View.MemoryView":974 + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) # <<<<<<<<<<<<<< + * else: + * memoryview.assign_item_from_object(self, itemp, value) + */ + __pyx_t_2 = __pyx_v_self->to_dtype_func(__pyx_v_itemp, __pyx_v_value); if (unlikely(__pyx_t_2 == ((int)0))) __PYX_ERR(1, 974, __pyx_L1_error) + + /* "View.MemoryView":973 + * + * cdef assign_item_from_object(self, char *itemp, object value): + * if self.to_dtype_func != NULL: # <<<<<<<<<<<<<< + * self.to_dtype_func(itemp, value) + * else: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":976 + * self.to_dtype_func(itemp, value) + * else: + * memoryview.assign_item_from_object(self, itemp, value) # <<<<<<<<<<<<<< + * + * cdef _get_base(self): + */ + /*else*/ { + __pyx_t_3 = __pyx_memoryview_assign_item_from_object(((struct __pyx_memoryview_obj *)__pyx_v_self), __pyx_v_itemp, __pyx_v_value); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 976, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_L3:; + + /* "View.MemoryView":972 + * return memoryview.convert_item_to_object(self, itemp) + * + * cdef assign_item_from_object(self, char *itemp, object value): # <<<<<<<<<<<<<< + * if self.to_dtype_func != NULL: + * self.to_dtype_func(itemp, value) + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.assign_item_from_object", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":978 + * memoryview.assign_item_from_object(self, itemp, value) + * + * cdef _get_base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + +static PyObject *__pyx_memoryviewslice__get_base(struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("_get_base", 0); + + /* "View.MemoryView":979 + * + * cdef _get_base(self): + * return self.from_object # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_self->from_object); + __pyx_r = __pyx_v_self->from_object; + goto __pyx_L0; + + /* "View.MemoryView":978 + * memoryview.assign_item_from_object(self, itemp, value) + * + * cdef _get_base(self): # <<<<<<<<<<<<<< + * return self.from_object + * + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_1__reduce_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__reduce_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 1, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + if (unlikely(__pyx_nargs > 0)) { + __Pyx_RaiseArgtupleInvalid("__reduce_cython__", 1, 0, 0, __pyx_nargs); return NULL;} + if (unlikely(__pyx_kwds) && __Pyx_NumKwargs_FASTCALL(__pyx_kwds) && unlikely(!__Pyx_CheckKeywordStrings(__pyx_kwds, "__reduce_cython__", 0))) return NULL; + goto __pyx_L4_argument_unpacking_done; + goto __pyx_L3_error; + __pyx_L3_error:; + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_memoryviewslice___reduce_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self)); + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice___reduce_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__reduce_cython__", 0); + + /* "(tree fragment)":2 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< + * def __setstate_cython__(self, __pyx_state): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); + __PYX_ERR(1, 2, __pyx_L1_error) + + /* "(tree fragment)":1 + * def __reduce_cython__(self): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__reduce_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + +/* Python wrapper */ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyObject *__pyx_pw___pyx_memoryviewslice_3__setstate_cython__(PyObject *__pyx_v_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + CYTHON_UNUSED PyObject *__pyx_v___pyx_state = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[1] = {0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__setstate_cython__ (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 3, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_state,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 3, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__setstate_cython__") < 0)) __PYX_ERR(1, 3, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 1)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + } + __pyx_v___pyx_state = values[0]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__setstate_cython__", 1, 1, 1, __pyx_nargs); __PYX_ERR(1, 3, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf___pyx_memoryviewslice_2__setstate_cython__(((struct __pyx_memoryviewslice_obj *)__pyx_v_self), __pyx_v___pyx_state); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf___pyx_memoryviewslice_2__setstate_cython__(CYTHON_UNUSED struct __pyx_memoryviewslice_obj *__pyx_v_self, CYTHON_UNUSED PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__setstate_cython__", 0); + + /* "(tree fragment)":4 + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" # <<<<<<<<<<<<<< + */ + __Pyx_Raise(__pyx_builtin_TypeError, __pyx_kp_s_no_default___reduce___due_to_non, 0, 0); + __PYX_ERR(1, 4, __pyx_L1_error) + + /* "(tree fragment)":3 + * def __reduce_cython__(self): + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + * def __setstate_cython__(self, __pyx_state): # <<<<<<<<<<<<<< + * raise TypeError, "no default __reduce__ due to non-trivial __cinit__" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView._memoryviewslice.__setstate_cython__", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + +static PyObject *__pyx_memoryview_fromslice(__Pyx_memviewslice __pyx_v_memviewslice, int __pyx_v_ndim, PyObject *(*__pyx_v_to_object_func)(char *), int (*__pyx_v_to_dtype_func)(char *, PyObject *), int __pyx_v_dtype_is_object) { + struct __pyx_memoryviewslice_obj *__pyx_v_result = 0; + Py_ssize_t __pyx_v_suboffset; + PyObject *__pyx_v_length = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + __Pyx_TypeInfo *__pyx_t_4; + Py_buffer __pyx_t_5; + Py_ssize_t *__pyx_t_6; + Py_ssize_t *__pyx_t_7; + Py_ssize_t *__pyx_t_8; + Py_ssize_t __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_fromslice", 0); + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + __pyx_t_1 = (((PyObject *)__pyx_v_memviewslice.memview) == Py_None); + if (__pyx_t_1) { + + /* "View.MemoryView":1008 + * + * if memviewslice.memview == Py_None: + * return None # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "View.MemoryView":1007 + * cdef _memoryviewslice result + * + * if memviewslice.memview == Py_None: # <<<<<<<<<<<<<< + * return None + * + */ + } + + /* "View.MemoryView":1013 + * + * + * result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) # <<<<<<<<<<<<<< + * + * result.from_slice = memviewslice + */ + __pyx_t_2 = __Pyx_PyBool_FromLong(__pyx_v_dtype_is_object); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyTuple_New(3); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_INCREF(Py_None); + __Pyx_GIVEREF(Py_None); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 0, Py_None)) __PYX_ERR(1, 1013, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_0); + __Pyx_GIVEREF(__pyx_int_0); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 1, __pyx_int_0)) __PYX_ERR(1, 1013, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_3, 2, __pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_2 = ((PyObject *)__pyx_tp_new__memoryviewslice(((PyTypeObject *)__pyx_memoryviewslice_type), __pyx_t_3, NULL)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1013, __pyx_L1_error) + __Pyx_GOTREF((PyObject *)__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1015 + * result = _memoryviewslice.__new__(_memoryviewslice, None, 0, dtype_is_object) + * + * result.from_slice = memviewslice # <<<<<<<<<<<<<< + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + */ + __pyx_v_result->from_slice = __pyx_v_memviewslice; + + /* "View.MemoryView":1016 + * + * result.from_slice = memviewslice + * __PYX_INC_MEMVIEW(&memviewslice, 1) # <<<<<<<<<<<<<< + * + * result.from_object = ( memviewslice.memview)._get_base() + */ + __PYX_INC_MEMVIEW((&__pyx_v_memviewslice), 1); + + /* "View.MemoryView":1018 + * __PYX_INC_MEMVIEW(&memviewslice, 1) + * + * result.from_object = ( memviewslice.memview)._get_base() # <<<<<<<<<<<<<< + * result.typeinfo = memviewslice.memview.typeinfo + * + */ + __pyx_t_2 = ((struct __pyx_vtabstruct_memoryview *)((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->__pyx_vtab)->_get_base(((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1018, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_2); + __Pyx_GOTREF(__pyx_v_result->from_object); + __Pyx_DECREF(__pyx_v_result->from_object); + __pyx_v_result->from_object = __pyx_t_2; + __pyx_t_2 = 0; + + /* "View.MemoryView":1019 + * + * result.from_object = ( memviewslice.memview)._get_base() + * result.typeinfo = memviewslice.memview.typeinfo # <<<<<<<<<<<<<< + * + * result.view = memviewslice.memview.view + */ + __pyx_t_4 = __pyx_v_memviewslice.memview->typeinfo; + __pyx_v_result->__pyx_base.typeinfo = __pyx_t_4; + + /* "View.MemoryView":1021 + * result.typeinfo = memviewslice.memview.typeinfo + * + * result.view = memviewslice.memview.view # <<<<<<<<<<<<<< + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + */ + __pyx_t_5 = __pyx_v_memviewslice.memview->view; + __pyx_v_result->__pyx_base.view = __pyx_t_5; + + /* "View.MemoryView":1022 + * + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data # <<<<<<<<<<<<<< + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + */ + __pyx_v_result->__pyx_base.view.buf = ((void *)__pyx_v_memviewslice.data); + + /* "View.MemoryView":1023 + * result.view = memviewslice.memview.view + * result.view.buf = memviewslice.data + * result.view.ndim = ndim # <<<<<<<<<<<<<< + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) + */ + __pyx_v_result->__pyx_base.view.ndim = __pyx_v_ndim; + + /* "View.MemoryView":1024 + * result.view.buf = memviewslice.data + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None # <<<<<<<<<<<<<< + * Py_INCREF(Py_None) + * + */ + ((Py_buffer *)(&__pyx_v_result->__pyx_base.view))->obj = Py_None; + + /* "View.MemoryView":1025 + * result.view.ndim = ndim + * (<__pyx_buffer *> &result.view).obj = Py_None + * Py_INCREF(Py_None) # <<<<<<<<<<<<<< + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + */ + Py_INCREF(Py_None); + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + __pyx_t_1 = ((((struct __pyx_memoryview_obj *)__pyx_v_memviewslice.memview)->flags & PyBUF_WRITABLE) != 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1028 + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: + * result.flags = PyBUF_RECORDS # <<<<<<<<<<<<<< + * else: + * result.flags = PyBUF_RECORDS_RO + */ + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS; + + /* "View.MemoryView":1027 + * Py_INCREF(Py_None) + * + * if (memviewslice.memview).flags & PyBUF_WRITABLE: # <<<<<<<<<<<<<< + * result.flags = PyBUF_RECORDS + * else: + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1030 + * result.flags = PyBUF_RECORDS + * else: + * result.flags = PyBUF_RECORDS_RO # <<<<<<<<<<<<<< + * + * result.view.shape = result.from_slice.shape + */ + /*else*/ { + __pyx_v_result->__pyx_base.flags = PyBUF_RECORDS_RO; + } + __pyx_L4:; + + /* "View.MemoryView":1032 + * result.flags = PyBUF_RECORDS_RO + * + * result.view.shape = result.from_slice.shape # <<<<<<<<<<<<<< + * result.view.strides = result.from_slice.strides + * + */ + __pyx_v_result->__pyx_base.view.shape = ((Py_ssize_t *)__pyx_v_result->from_slice.shape); + + /* "View.MemoryView":1033 + * + * result.view.shape = result.from_slice.shape + * result.view.strides = result.from_slice.strides # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_result->__pyx_base.view.strides = ((Py_ssize_t *)__pyx_v_result->from_slice.strides); + + /* "View.MemoryView":1036 + * + * + * result.view.suboffsets = NULL # <<<<<<<<<<<<<< + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + */ + __pyx_v_result->__pyx_base.view.suboffsets = NULL; + + /* "View.MemoryView":1037 + * + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: # <<<<<<<<<<<<<< + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + */ + __pyx_t_7 = (__pyx_v_result->from_slice.suboffsets + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->from_slice.suboffsets; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_v_suboffset = (__pyx_t_6[0]); + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + __pyx_t_1 = (__pyx_v_suboffset >= 0); + if (__pyx_t_1) { + + /* "View.MemoryView":1039 + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_result->__pyx_base.view.suboffsets = ((Py_ssize_t *)__pyx_v_result->from_slice.suboffsets); + + /* "View.MemoryView":1040 + * if suboffset >= 0: + * result.view.suboffsets = result.from_slice.suboffsets + * break # <<<<<<<<<<<<<< + * + * result.view.len = result.view.itemsize + */ + goto __pyx_L6_break; + + /* "View.MemoryView":1038 + * result.view.suboffsets = NULL + * for suboffset in result.from_slice.suboffsets[:ndim]: + * if suboffset >= 0: # <<<<<<<<<<<<<< + * result.view.suboffsets = result.from_slice.suboffsets + * break + */ + } + } + __pyx_L6_break:; + + /* "View.MemoryView":1042 + * break + * + * result.view.len = result.view.itemsize # <<<<<<<<<<<<<< + * for length in result.view.shape[:ndim]: + * result.view.len *= length + */ + __pyx_t_9 = __pyx_v_result->__pyx_base.view.itemsize; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + + /* "View.MemoryView":1043 + * + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: # <<<<<<<<<<<<<< + * result.view.len *= length + * + */ + __pyx_t_7 = (__pyx_v_result->__pyx_base.view.shape + __pyx_v_ndim); + for (__pyx_t_8 = __pyx_v_result->__pyx_base.view.shape; __pyx_t_8 < __pyx_t_7; __pyx_t_8++) { + __pyx_t_6 = __pyx_t_8; + __pyx_t_2 = PyInt_FromSsize_t((__pyx_t_6[0])); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1043, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_XDECREF_SET(__pyx_v_length, __pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1044 + * result.view.len = result.view.itemsize + * for length in result.view.shape[:ndim]: + * result.view.len *= length # <<<<<<<<<<<<<< + * + * result.to_object_func = to_object_func + */ + __pyx_t_2 = PyInt_FromSsize_t(__pyx_v_result->__pyx_base.view.len); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_InPlaceMultiply(__pyx_t_2, __pyx_v_length); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_9 = __Pyx_PyIndex_AsSsize_t(__pyx_t_3); if (unlikely((__pyx_t_9 == (Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(1, 1044, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_result->__pyx_base.view.len = __pyx_t_9; + } + + /* "View.MemoryView":1046 + * result.view.len *= length + * + * result.to_object_func = to_object_func # <<<<<<<<<<<<<< + * result.to_dtype_func = to_dtype_func + * + */ + __pyx_v_result->to_object_func = __pyx_v_to_object_func; + + /* "View.MemoryView":1047 + * + * result.to_object_func = to_object_func + * result.to_dtype_func = to_dtype_func # <<<<<<<<<<<<<< + * + * return result + */ + __pyx_v_result->to_dtype_func = __pyx_v_to_dtype_func; + + /* "View.MemoryView":1049 + * result.to_dtype_func = to_dtype_func + * + * return result # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF((PyObject *)__pyx_v_result); + __pyx_r = ((PyObject *)__pyx_v_result); + goto __pyx_L0; + + /* "View.MemoryView":999 + * + * @cname('__pyx_memoryview_fromslice') + * cdef memoryview_fromslice(__Pyx_memviewslice memviewslice, # <<<<<<<<<<<<<< + * int ndim, + * object (*to_object_func)(char *), + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_AddTraceback("View.MemoryView.memoryview_fromslice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_result); + __Pyx_XDECREF(__pyx_v_length); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + +static __Pyx_memviewslice *__pyx_memoryview_get_slice_from_memoryview(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_mslice) { + struct __pyx_memoryviewslice_obj *__pyx_v_obj = 0; + __Pyx_memviewslice *__pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("get_slice_from_memview", 0); + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + if (__pyx_t_1) { + + /* "View.MemoryView":1056 + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): + * obj = memview # <<<<<<<<<<<<<< + * return &obj.from_slice + * else: + */ + if (!(likely(((((PyObject *)__pyx_v_memview)) == Py_None) || likely(__Pyx_TypeTest(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type))))) __PYX_ERR(1, 1056, __pyx_L1_error) + __pyx_t_2 = ((PyObject *)__pyx_v_memview); + __Pyx_INCREF(__pyx_t_2); + __pyx_v_obj = ((struct __pyx_memoryviewslice_obj *)__pyx_t_2); + __pyx_t_2 = 0; + + /* "View.MemoryView":1057 + * if isinstance(memview, _memoryviewslice): + * obj = memview + * return &obj.from_slice # <<<<<<<<<<<<<< + * else: + * slice_copy(memview, mslice) + */ + __pyx_r = (&__pyx_v_obj->from_slice); + goto __pyx_L0; + + /* "View.MemoryView":1055 + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * obj = memview + * return &obj.from_slice + */ + } + + /* "View.MemoryView":1059 + * return &obj.from_slice + * else: + * slice_copy(memview, mslice) # <<<<<<<<<<<<<< + * return mslice + * + */ + /*else*/ { + __pyx_memoryview_slice_copy(__pyx_v_memview, __pyx_v_mslice); + + /* "View.MemoryView":1060 + * else: + * slice_copy(memview, mslice) + * return mslice # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_slice_copy') + */ + __pyx_r = __pyx_v_mslice; + goto __pyx_L0; + } + + /* "View.MemoryView":1052 + * + * @cname('__pyx_memoryview_get_slice_from_memoryview') + * cdef __Pyx_memviewslice *get_slice_from_memview(memoryview memview, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *mslice) except NULL: + * cdef _memoryviewslice obj + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView.get_slice_from_memview", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF((PyObject *)__pyx_v_obj); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst) noexcept: # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + +static void __pyx_memoryview_slice_copy(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_dst) { + int __pyx_v_dim; + Py_ssize_t *__pyx_v_shape; + Py_ssize_t *__pyx_v_strides; + Py_ssize_t *__pyx_v_suboffsets; + __Pyx_RefNannyDeclarations + Py_ssize_t *__pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + Py_ssize_t __pyx_t_5; + int __pyx_t_6; + __Pyx_RefNannySetupContext("slice_copy", 0); + + /* "View.MemoryView":1067 + * cdef (Py_ssize_t*) shape, strides, suboffsets + * + * shape = memview.view.shape # <<<<<<<<<<<<<< + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets + */ + __pyx_t_1 = __pyx_v_memview->view.shape; + __pyx_v_shape = __pyx_t_1; + + /* "View.MemoryView":1068 + * + * shape = memview.view.shape + * strides = memview.view.strides # <<<<<<<<<<<<<< + * suboffsets = memview.view.suboffsets + * + */ + __pyx_t_1 = __pyx_v_memview->view.strides; + __pyx_v_strides = __pyx_t_1; + + /* "View.MemoryView":1069 + * shape = memview.view.shape + * strides = memview.view.strides + * suboffsets = memview.view.suboffsets # <<<<<<<<<<<<<< + * + * dst.memview = <__pyx_memoryview *> memview + */ + __pyx_t_1 = __pyx_v_memview->view.suboffsets; + __pyx_v_suboffsets = __pyx_t_1; + + /* "View.MemoryView":1071 + * suboffsets = memview.view.suboffsets + * + * dst.memview = <__pyx_memoryview *> memview # <<<<<<<<<<<<<< + * dst.data = memview.view.buf + * + */ + __pyx_v_dst->memview = ((struct __pyx_memoryview_obj *)__pyx_v_memview); + + /* "View.MemoryView":1072 + * + * dst.memview = <__pyx_memoryview *> memview + * dst.data = memview.view.buf # <<<<<<<<<<<<<< + * + * for dim in range(memview.view.ndim): + */ + __pyx_v_dst->data = ((char *)__pyx_v_memview->view.buf); + + /* "View.MemoryView":1074 + * dst.data = memview.view.buf + * + * for dim in range(memview.view.ndim): # <<<<<<<<<<<<<< + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + */ + __pyx_t_2 = __pyx_v_memview->view.ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_dim = __pyx_t_4; + + /* "View.MemoryView":1075 + * + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] # <<<<<<<<<<<<<< + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + */ + (__pyx_v_dst->shape[__pyx_v_dim]) = (__pyx_v_shape[__pyx_v_dim]); + + /* "View.MemoryView":1076 + * for dim in range(memview.view.ndim): + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] # <<<<<<<<<<<<<< + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 + * + */ + (__pyx_v_dst->strides[__pyx_v_dim]) = (__pyx_v_strides[__pyx_v_dim]); + + /* "View.MemoryView":1077 + * dst.shape[dim] = shape[dim] + * dst.strides[dim] = strides[dim] + * dst.suboffsets[dim] = suboffsets[dim] if suboffsets else -1 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object') + */ + __pyx_t_6 = (__pyx_v_suboffsets != 0); + if (__pyx_t_6) { + __pyx_t_5 = (__pyx_v_suboffsets[__pyx_v_dim]); + } else { + __pyx_t_5 = -1L; + } + (__pyx_v_dst->suboffsets[__pyx_v_dim]) = __pyx_t_5; + } + + /* "View.MemoryView":1063 + * + * @cname('__pyx_memoryview_slice_copy') + * cdef void slice_copy(memoryview memview, __Pyx_memviewslice *dst) noexcept: # <<<<<<<<<<<<<< + * cdef int dim + * cdef (Py_ssize_t*) shape, strides, suboffsets + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + +static PyObject *__pyx_memoryview_copy_object(struct __pyx_memoryview_obj *__pyx_v_memview) { + __Pyx_memviewslice __pyx_v_memviewslice; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy", 0); + + /* "View.MemoryView":1083 + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) # <<<<<<<<<<<<<< + * return memoryview_copy_from_slice(memview, &memviewslice) + * + */ + __pyx_memoryview_slice_copy(__pyx_v_memview, (&__pyx_v_memviewslice)); + + /* "View.MemoryView":1084 + * cdef __Pyx_memviewslice memviewslice + * slice_copy(memview, &memviewslice) + * return memoryview_copy_from_slice(memview, &memviewslice) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_object_from_slice') + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = __pyx_memoryview_copy_object_from_slice(__pyx_v_memview, (&__pyx_v_memviewslice)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1084, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1080 + * + * @cname('__pyx_memoryview_copy_object') + * cdef memoryview_copy(memoryview memview): # <<<<<<<<<<<<<< + * "Create a new memoryview object" + * cdef __Pyx_memviewslice memviewslice + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + +static PyObject *__pyx_memoryview_copy_object_from_slice(struct __pyx_memoryview_obj *__pyx_v_memview, __Pyx_memviewslice *__pyx_v_memviewslice) { + PyObject *(*__pyx_v_to_object_func)(char *); + int (*__pyx_v_to_dtype_func)(char *, PyObject *); + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + PyObject *(*__pyx_t_2)(char *); + int (*__pyx_t_3)(char *, PyObject *); + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("memoryview_copy_from_slice", 0); + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + __pyx_t_1 = __Pyx_TypeCheck(((PyObject *)__pyx_v_memview), __pyx_memoryviewslice_type); + if (__pyx_t_1) { + + /* "View.MemoryView":1095 + * + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func # <<<<<<<<<<<<<< + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + */ + __pyx_t_2 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_object_func; + __pyx_v_to_object_func = __pyx_t_2; + + /* "View.MemoryView":1096 + * if isinstance(memview, _memoryviewslice): + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func # <<<<<<<<<<<<<< + * else: + * to_object_func = NULL + */ + __pyx_t_3 = ((struct __pyx_memoryviewslice_obj *)__pyx_v_memview)->to_dtype_func; + __pyx_v_to_dtype_func = __pyx_t_3; + + /* "View.MemoryView":1094 + * cdef int (*to_dtype_func)(char *, object) except 0 + * + * if isinstance(memview, _memoryviewslice): # <<<<<<<<<<<<<< + * to_object_func = (<_memoryviewslice> memview).to_object_func + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1098 + * to_dtype_func = (<_memoryviewslice> memview).to_dtype_func + * else: + * to_object_func = NULL # <<<<<<<<<<<<<< + * to_dtype_func = NULL + * + */ + /*else*/ { + __pyx_v_to_object_func = NULL; + + /* "View.MemoryView":1099 + * else: + * to_object_func = NULL + * to_dtype_func = NULL # <<<<<<<<<<<<<< + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + */ + __pyx_v_to_dtype_func = NULL; + } + __pyx_L3:; + + /* "View.MemoryView":1101 + * to_dtype_func = NULL + * + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, # <<<<<<<<<<<<<< + * to_object_func, to_dtype_func, + * memview.dtype_is_object) + */ + __Pyx_XDECREF(__pyx_r); + + /* "View.MemoryView":1103 + * return memoryview_fromslice(memviewslice[0], memview.view.ndim, + * to_object_func, to_dtype_func, + * memview.dtype_is_object) # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_4 = __pyx_memoryview_fromslice((__pyx_v_memviewslice[0]), __pyx_v_memview->view.ndim, __pyx_v_to_object_func, __pyx_v_to_dtype_func, __pyx_v_memview->dtype_is_object); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "View.MemoryView":1087 + * + * @cname('__pyx_memoryview_copy_object_from_slice') + * cdef memoryview_copy_from_slice(memoryview memview, __Pyx_memviewslice *memviewslice): # <<<<<<<<<<<<<< + * """ + * Create a new memoryview object from a given memoryview object and slice. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_from_slice", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: # <<<<<<<<<<<<<< + * return -arg if arg < 0 else arg + * + */ + +static Py_ssize_t abs_py_ssize_t(Py_ssize_t __pyx_v_arg) { + Py_ssize_t __pyx_r; + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + + /* "View.MemoryView":1110 + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: + * return -arg if arg < 0 else arg # <<<<<<<<<<<<<< + * + * @cname('__pyx_get_best_slice_order') + */ + __pyx_t_2 = (__pyx_v_arg < 0); + if (__pyx_t_2) { + __pyx_t_1 = (-__pyx_v_arg); + } else { + __pyx_t_1 = __pyx_v_arg; + } + __pyx_r = __pyx_t_1; + goto __pyx_L0; + + /* "View.MemoryView":1109 + * + * + * cdef Py_ssize_t abs_py_ssize_t(Py_ssize_t arg) noexcept nogil: # <<<<<<<<<<<<<< + * return -arg if arg < 0 else arg + * + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1113 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) noexcept nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + +static char __pyx_get_best_slice_order(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim) { + int __pyx_v_i; + Py_ssize_t __pyx_v_c_stride; + Py_ssize_t __pyx_v_f_stride; + char __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1118 + * """ + * cdef int i + * cdef Py_ssize_t c_stride = 0 # <<<<<<<<<<<<<< + * cdef Py_ssize_t f_stride = 0 + * + */ + __pyx_v_c_stride = 0; + + /* "View.MemoryView":1119 + * cdef int i + * cdef Py_ssize_t c_stride = 0 + * cdef Py_ssize_t f_stride = 0 # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_f_stride = 0; + + /* "View.MemoryView":1121 + * cdef Py_ssize_t f_stride = 0 + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1122 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = ((__pyx_v_mslice->shape[__pyx_v_i]) > 1); + if (__pyx_t_2) { + + /* "View.MemoryView":1123 + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_c_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1124 + * if mslice.shape[i] > 1: + * c_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + goto __pyx_L4_break; + + /* "View.MemoryView":1122 + * + * for i in range(ndim - 1, -1, -1): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * c_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L4_break:; + + /* "View.MemoryView":1126 + * break + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + */ + __pyx_t_1 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_1; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1127 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + __pyx_t_2 = ((__pyx_v_mslice->shape[__pyx_v_i]) > 1); + if (__pyx_t_2) { + + /* "View.MemoryView":1128 + * for i in range(ndim): + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] # <<<<<<<<<<<<<< + * break + * + */ + __pyx_v_f_stride = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1129 + * if mslice.shape[i] > 1: + * f_stride = mslice.strides[i] + * break # <<<<<<<<<<<<<< + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + */ + goto __pyx_L7_break; + + /* "View.MemoryView":1127 + * + * for i in range(ndim): + * if mslice.shape[i] > 1: # <<<<<<<<<<<<<< + * f_stride = mslice.strides[i] + * break + */ + } + } + __pyx_L7_break:; + + /* "View.MemoryView":1131 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + __pyx_t_2 = (abs_py_ssize_t(__pyx_v_c_stride) <= abs_py_ssize_t(__pyx_v_f_stride)); + if (__pyx_t_2) { + + /* "View.MemoryView":1132 + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): + * return 'C' # <<<<<<<<<<<<<< + * else: + * return 'F' + */ + __pyx_r = 'C'; + goto __pyx_L0; + + /* "View.MemoryView":1131 + * break + * + * if abs_py_ssize_t(c_stride) <= abs_py_ssize_t(f_stride): # <<<<<<<<<<<<<< + * return 'C' + * else: + */ + } + + /* "View.MemoryView":1134 + * return 'C' + * else: + * return 'F' # <<<<<<<<<<<<<< + * + * @cython.cdivision(True) + */ + /*else*/ { + __pyx_r = 'F'; + goto __pyx_L0; + } + + /* "View.MemoryView":1113 + * + * @cname('__pyx_get_best_slice_order') + * cdef char get_best_order(__Pyx_memviewslice *mslice, int ndim) noexcept nogil: # <<<<<<<<<<<<<< + * """ + * Figure out the best memory access order for a given slice. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1137 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + +static void _copy_strided_to_strided(char *__pyx_v_src_data, Py_ssize_t *__pyx_v_src_strides, char *__pyx_v_dst_data, Py_ssize_t *__pyx_v_dst_strides, Py_ssize_t *__pyx_v_src_shape, Py_ssize_t *__pyx_v_dst_shape, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + CYTHON_UNUSED Py_ssize_t __pyx_v_src_extent; + Py_ssize_t __pyx_v_dst_extent; + Py_ssize_t __pyx_v_src_stride; + Py_ssize_t __pyx_v_dst_stride; + int __pyx_t_1; + int __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + + /* "View.MemoryView":1144 + * + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + */ + __pyx_v_src_extent = (__pyx_v_src_shape[0]); + + /* "View.MemoryView":1145 + * cdef Py_ssize_t i + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] + */ + __pyx_v_dst_extent = (__pyx_v_dst_shape[0]); + + /* "View.MemoryView":1146 + * cdef Py_ssize_t src_extent = src_shape[0] + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + */ + __pyx_v_src_stride = (__pyx_v_src_strides[0]); + + /* "View.MemoryView":1147 + * cdef Py_ssize_t dst_extent = dst_shape[0] + * cdef Py_ssize_t src_stride = src_strides[0] + * cdef Py_ssize_t dst_stride = dst_strides[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_dst_stride = (__pyx_v_dst_strides[0]); + + /* "View.MemoryView":1149 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + __pyx_t_1 = (__pyx_v_ndim == 1); + if (__pyx_t_1) { + + /* "View.MemoryView":1150 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + __pyx_t_2 = (__pyx_v_src_stride > 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + __pyx_t_2 = (__pyx_v_dst_stride > 0); + if (__pyx_t_2) { + } else { + __pyx_t_1 = __pyx_t_2; + goto __pyx_L5_bool_binop_done; + } + + /* "View.MemoryView":1151 + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + */ + __pyx_t_2 = (((size_t)__pyx_v_src_stride) == __pyx_v_itemsize); + if (__pyx_t_2) { + __pyx_t_2 = (__pyx_v_itemsize == ((size_t)__pyx_v_dst_stride)); + } + __pyx_t_1 = __pyx_t_2; + __pyx_L5_bool_binop_done:; + + /* "View.MemoryView":1150 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + if (__pyx_t_1) { + + /* "View.MemoryView":1152 + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, (__pyx_v_itemsize * __pyx_v_dst_extent))); + + /* "View.MemoryView":1150 + * + * if ndim == 1: + * if (src_stride > 0 and dst_stride > 0 and # <<<<<<<<<<<<<< + * src_stride == itemsize == dst_stride): + * memcpy(dst_data, src_data, itemsize * dst_extent) + */ + goto __pyx_L4; + } + + /* "View.MemoryView":1154 + * memcpy(dst_data, src_data, itemsize * dst_extent) + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + */ + /*else*/ { + __pyx_t_3 = __pyx_v_dst_extent; + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "View.MemoryView":1155 + * else: + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) # <<<<<<<<<<<<<< + * src_data += src_stride + * dst_data += dst_stride + */ + (void)(memcpy(__pyx_v_dst_data, __pyx_v_src_data, __pyx_v_itemsize)); + + /* "View.MemoryView":1156 + * for i in range(dst_extent): + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * else: + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1157 + * memcpy(dst_data, src_data, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * else: + * for i in range(dst_extent): + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L4:; + + /* "View.MemoryView":1149 + * cdef Py_ssize_t dst_stride = dst_strides[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * if (src_stride > 0 and dst_stride > 0 and + * src_stride == itemsize == dst_stride): + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1159 + * dst_data += dst_stride + * else: + * for i in range(dst_extent): # <<<<<<<<<<<<<< + * _copy_strided_to_strided(src_data, src_strides + 1, + * dst_data, dst_strides + 1, + */ + /*else*/ { + __pyx_t_3 = __pyx_v_dst_extent; + __pyx_t_4 = __pyx_t_3; + for (__pyx_t_5 = 0; __pyx_t_5 < __pyx_t_4; __pyx_t_5+=1) { + __pyx_v_i = __pyx_t_5; + + /* "View.MemoryView":1160 + * else: + * for i in range(dst_extent): + * _copy_strided_to_strided(src_data, src_strides + 1, # <<<<<<<<<<<<<< + * dst_data, dst_strides + 1, + * src_shape + 1, dst_shape + 1, + */ + _copy_strided_to_strided(__pyx_v_src_data, (__pyx_v_src_strides + 1), __pyx_v_dst_data, (__pyx_v_dst_strides + 1), (__pyx_v_src_shape + 1), (__pyx_v_dst_shape + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize); + + /* "View.MemoryView":1164 + * src_shape + 1, dst_shape + 1, + * ndim - 1, itemsize) + * src_data += src_stride # <<<<<<<<<<<<<< + * dst_data += dst_stride + * + */ + __pyx_v_src_data = (__pyx_v_src_data + __pyx_v_src_stride); + + /* "View.MemoryView":1165 + * ndim - 1, itemsize) + * src_data += src_stride + * dst_data += dst_stride # <<<<<<<<<<<<<< + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, + */ + __pyx_v_dst_data = (__pyx_v_dst_data + __pyx_v_dst_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1137 + * + * @cython.cdivision(True) + * cdef void _copy_strided_to_strided(char *src_data, Py_ssize_t *src_strides, # <<<<<<<<<<<<<< + * char *dst_data, Py_ssize_t *dst_strides, + * Py_ssize_t *src_shape, Py_ssize_t *dst_shape, + */ + + /* function exit code */ +} + +/* "View.MemoryView":1167 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) noexcept nogil: + */ + +static void copy_strided_to_strided(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize) { + + /* "View.MemoryView":1170 + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) noexcept nogil: + * _copy_strided_to_strided(src.data, src.strides, dst.data, dst.strides, # <<<<<<<<<<<<<< + * src.shape, dst.shape, ndim, itemsize) + * + */ + _copy_strided_to_strided(__pyx_v_src->data, __pyx_v_src->strides, __pyx_v_dst->data, __pyx_v_dst->strides, __pyx_v_src->shape, __pyx_v_dst->shape, __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1167 + * dst_data += dst_stride + * + * cdef void copy_strided_to_strided(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *dst, + * int ndim, size_t itemsize) noexcept nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1174 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + +static Py_ssize_t __pyx_memoryview_slice_get_size(__Pyx_memviewslice *__pyx_v_src, int __pyx_v_ndim) { + Py_ssize_t __pyx_v_shape; + Py_ssize_t __pyx_v_size; + Py_ssize_t __pyx_r; + Py_ssize_t __pyx_t_1; + Py_ssize_t *__pyx_t_2; + Py_ssize_t *__pyx_t_3; + Py_ssize_t *__pyx_t_4; + + /* "View.MemoryView":1176 + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil: + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize # <<<<<<<<<<<<<< + * + * for shape in src.shape[:ndim]: + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_size = __pyx_t_1; + + /* "View.MemoryView":1178 + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + * + * for shape in src.shape[:ndim]: # <<<<<<<<<<<<<< + * size *= shape + * + */ + __pyx_t_3 = (__pyx_v_src->shape + __pyx_v_ndim); + for (__pyx_t_4 = __pyx_v_src->shape; __pyx_t_4 < __pyx_t_3; __pyx_t_4++) { + __pyx_t_2 = __pyx_t_4; + __pyx_v_shape = (__pyx_t_2[0]); + + /* "View.MemoryView":1179 + * + * for shape in src.shape[:ndim]: + * size *= shape # <<<<<<<<<<<<<< + * + * return size + */ + __pyx_v_size = (__pyx_v_size * __pyx_v_shape); + } + + /* "View.MemoryView":1181 + * size *= shape + * + * return size # <<<<<<<<<<<<<< + * + * @cname('__pyx_fill_contig_strides_array') + */ + __pyx_r = __pyx_v_size; + goto __pyx_L0; + + /* "View.MemoryView":1174 + * + * @cname('__pyx_memoryview_slice_get_size') + * cdef Py_ssize_t slice_get_size(__Pyx_memviewslice *src, int ndim) noexcept nogil: # <<<<<<<<<<<<<< + * "Return the size of the memory occupied by the slice in number of bytes" + * cdef Py_ssize_t shape, size = src.memview.view.itemsize + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1184 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) noexcept nogil: + */ + +static Py_ssize_t __pyx_fill_contig_strides_array(Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, Py_ssize_t __pyx_v_stride, int __pyx_v_ndim, char __pyx_v_order) { + int __pyx_v_idx; + Py_ssize_t __pyx_r; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + + /* "View.MemoryView":1193 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + __pyx_t_1 = (__pyx_v_order == 'F'); + if (__pyx_t_1) { + + /* "View.MemoryView":1194 + * + * if order == 'F': + * for idx in range(ndim): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + __pyx_t_2 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_idx = __pyx_t_4; + + /* "View.MemoryView":1195 + * if order == 'F': + * for idx in range(ndim): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * else: + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1196 + * for idx in range(ndim): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * else: + * for idx in range(ndim - 1, -1, -1): + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + + /* "View.MemoryView":1193 + * cdef int idx + * + * if order == 'F': # <<<<<<<<<<<<<< + * for idx in range(ndim): + * strides[idx] = stride + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1198 + * stride *= shape[idx] + * else: + * for idx in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * strides[idx] = stride + * stride *= shape[idx] + */ + /*else*/ { + for (__pyx_t_2 = (__pyx_v_ndim - 1); __pyx_t_2 > -1; __pyx_t_2-=1) { + __pyx_v_idx = __pyx_t_2; + + /* "View.MemoryView":1199 + * else: + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride # <<<<<<<<<<<<<< + * stride *= shape[idx] + * + */ + (__pyx_v_strides[__pyx_v_idx]) = __pyx_v_stride; + + /* "View.MemoryView":1200 + * for idx in range(ndim - 1, -1, -1): + * strides[idx] = stride + * stride *= shape[idx] # <<<<<<<<<<<<<< + * + * return stride + */ + __pyx_v_stride = (__pyx_v_stride * (__pyx_v_shape[__pyx_v_idx])); + } + } + __pyx_L3:; + + /* "View.MemoryView":1202 + * stride *= shape[idx] + * + * return stride # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_copy_data_to_temp') + */ + __pyx_r = __pyx_v_stride; + goto __pyx_L0; + + /* "View.MemoryView":1184 + * + * @cname('__pyx_fill_contig_strides_array') + * cdef Py_ssize_t fill_contig_strides_array( # <<<<<<<<<<<<<< + * Py_ssize_t *shape, Py_ssize_t *strides, Py_ssize_t stride, + * int ndim, char order) noexcept nogil: + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "View.MemoryView":1205 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + +static void *__pyx_memoryview_copy_data_to_temp(__Pyx_memviewslice *__pyx_v_src, __Pyx_memviewslice *__pyx_v_tmpslice, char __pyx_v_order, int __pyx_v_ndim) { + int __pyx_v_i; + void *__pyx_v_result; + size_t __pyx_v_itemsize; + size_t __pyx_v_size; + void *__pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + struct __pyx_memoryview_obj *__pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save; + #endif + __Pyx_RefNannySetupContext("copy_data_to_temp", 1); + + /* "View.MemoryView":1216 + * cdef void *result + * + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef size_t size = slice_get_size(src, ndim) + * + */ + __pyx_t_1 = __pyx_v_src->memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1217 + * + * cdef size_t itemsize = src.memview.view.itemsize + * cdef size_t size = slice_get_size(src, ndim) # <<<<<<<<<<<<<< + * + * result = malloc(size) + */ + __pyx_v_size = __pyx_memoryview_slice_get_size(__pyx_v_src, __pyx_v_ndim); + + /* "View.MemoryView":1219 + * cdef size_t size = slice_get_size(src, ndim) + * + * result = malloc(size) # <<<<<<<<<<<<<< + * if not result: + * _err_no_memory() + */ + __pyx_v_result = malloc(__pyx_v_size); + + /* "View.MemoryView":1220 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err_no_memory() + * + */ + __pyx_t_2 = (!(__pyx_v_result != 0)); + if (__pyx_t_2) { + + /* "View.MemoryView":1221 + * result = malloc(size) + * if not result: + * _err_no_memory() # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_3 = __pyx_memoryview_err_no_memory(); if (unlikely(__pyx_t_3 == ((int)-1))) __PYX_ERR(1, 1221, __pyx_L1_error) + + /* "View.MemoryView":1220 + * + * result = malloc(size) + * if not result: # <<<<<<<<<<<<<< + * _err_no_memory() + * + */ + } + + /* "View.MemoryView":1224 + * + * + * tmpslice.data = result # <<<<<<<<<<<<<< + * tmpslice.memview = src.memview + * for i in range(ndim): + */ + __pyx_v_tmpslice->data = ((char *)__pyx_v_result); + + /* "View.MemoryView":1225 + * + * tmpslice.data = result + * tmpslice.memview = src.memview # <<<<<<<<<<<<<< + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + */ + __pyx_t_4 = __pyx_v_src->memview; + __pyx_v_tmpslice->memview = __pyx_t_4; + + /* "View.MemoryView":1226 + * tmpslice.data = result + * tmpslice.memview = src.memview + * for i in range(ndim): # <<<<<<<<<<<<<< + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1227 + * tmpslice.memview = src.memview + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] # <<<<<<<<<<<<<< + * tmpslice.suboffsets[i] = -1 + * + */ + (__pyx_v_tmpslice->shape[__pyx_v_i]) = (__pyx_v_src->shape[__pyx_v_i]); + + /* "View.MemoryView":1228 + * for i in range(ndim): + * tmpslice.shape[i] = src.shape[i] + * tmpslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order) + */ + (__pyx_v_tmpslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1230 + * tmpslice.suboffsets[i] = -1 + * + * fill_contig_strides_array(&tmpslice.shape[0], &tmpslice.strides[0], itemsize, ndim, order) # <<<<<<<<<<<<<< + * + * + */ + (void)(__pyx_fill_contig_strides_array((&(__pyx_v_tmpslice->shape[0])), (&(__pyx_v_tmpslice->strides[0])), __pyx_v_itemsize, __pyx_v_ndim, __pyx_v_order)); + + /* "View.MemoryView":1233 + * + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 + */ + __pyx_t_3 = __pyx_v_ndim; + __pyx_t_5 = __pyx_t_3; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_i = __pyx_t_6; + + /* "View.MemoryView":1234 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + __pyx_t_2 = ((__pyx_v_tmpslice->shape[__pyx_v_i]) == 1); + if (__pyx_t_2) { + + /* "View.MemoryView":1235 + * for i in range(ndim): + * if tmpslice.shape[i] == 1: + * tmpslice.strides[i] = 0 # <<<<<<<<<<<<<< + * + * if slice_is_contig(src[0], order, ndim): + */ + (__pyx_v_tmpslice->strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1234 + * + * for i in range(ndim): + * if tmpslice.shape[i] == 1: # <<<<<<<<<<<<<< + * tmpslice.strides[i] = 0 + * + */ + } + } + + /* "View.MemoryView":1237 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + __pyx_t_2 = __pyx_memviewslice_is_contig((__pyx_v_src[0]), __pyx_v_order, __pyx_v_ndim); + if (__pyx_t_2) { + + /* "View.MemoryView":1238 + * + * if slice_is_contig(src[0], order, ndim): + * memcpy(result, src.data, size) # <<<<<<<<<<<<<< + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + */ + (void)(memcpy(__pyx_v_result, __pyx_v_src->data, __pyx_v_size)); + + /* "View.MemoryView":1237 + * tmpslice.strides[i] = 0 + * + * if slice_is_contig(src[0], order, ndim): # <<<<<<<<<<<<<< + * memcpy(result, src.data, size) + * else: + */ + goto __pyx_L9; + } + + /* "View.MemoryView":1240 + * memcpy(result, src.data, size) + * else: + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) # <<<<<<<<<<<<<< + * + * return result + */ + /*else*/ { + copy_strided_to_strided(__pyx_v_src, __pyx_v_tmpslice, __pyx_v_ndim, __pyx_v_itemsize); + } + __pyx_L9:; + + /* "View.MemoryView":1242 + * copy_strided_to_strided(src, tmpslice, ndim, itemsize) + * + * return result # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = __pyx_v_result; + goto __pyx_L0; + + /* "View.MemoryView":1205 + * + * @cname('__pyx_memoryview_copy_data_to_temp') + * cdef void *copy_data_to_temp(__Pyx_memviewslice *src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice *tmpslice, + * char order, + */ + + /* function exit code */ + __pyx_L1_error:; + #ifdef WITH_THREAD + __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.copy_data_to_temp", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __pyx_L0:; + __Pyx_RefNannyFinishContextNogil() + return __pyx_r; +} + +/* "View.MemoryView":1247 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" + */ + +static int __pyx_memoryview_err_extents(int __pyx_v_i, Py_ssize_t __pyx_v_extent1, Py_ssize_t __pyx_v_extent2) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + Py_UCS4 __pyx_t_3; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_extents", 0); + + /* "View.MemoryView":1249 + * cdef int _err_extents(int i, Py_ssize_t extent1, + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err_dim') + */ + __pyx_t_1 = PyTuple_New(7); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = 0; + __pyx_t_3 = 127; + __Pyx_INCREF(__pyx_kp_u_got_differing_extents_in_dimensi); + __pyx_t_2 += 35; + __Pyx_GIVEREF(__pyx_kp_u_got_differing_extents_in_dimensi); + PyTuple_SET_ITEM(__pyx_t_1, 0, __pyx_kp_u_got_differing_extents_in_dimensi); + __pyx_t_4 = __Pyx_PyUnicode_From_int(__pyx_v_i, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_1, 1, __pyx_t_4); + __pyx_t_4 = 0; + __Pyx_INCREF(__pyx_kp_u_got); + __pyx_t_2 += 6; + __Pyx_GIVEREF(__pyx_kp_u_got); + PyTuple_SET_ITEM(__pyx_t_1, 2, __pyx_kp_u_got); + __pyx_t_4 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_extent1, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_1, 3, __pyx_t_4); + __pyx_t_4 = 0; + __Pyx_INCREF(__pyx_kp_u_and); + __pyx_t_2 += 5; + __Pyx_GIVEREF(__pyx_kp_u_and); + PyTuple_SET_ITEM(__pyx_t_1, 4, __pyx_kp_u_and); + __pyx_t_4 = __Pyx_PyUnicode_From_Py_ssize_t(__pyx_v_extent2, 0, ' ', 'd'); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_2 += __Pyx_PyUnicode_GET_LENGTH(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_4); + PyTuple_SET_ITEM(__pyx_t_1, 5, __pyx_t_4); + __pyx_t_4 = 0; + __Pyx_INCREF(__pyx_kp_u__7); + __pyx_t_2 += 1; + __Pyx_GIVEREF(__pyx_kp_u__7); + PyTuple_SET_ITEM(__pyx_t_1, 6, __pyx_kp_u__7); + __pyx_t_4 = __Pyx_PyUnicode_Join(__pyx_t_1, 7, __pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 1249, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_Raise(__pyx_builtin_ValueError, __pyx_t_4, 0, 0); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __PYX_ERR(1, 1249, __pyx_L1_error) + + /* "View.MemoryView":1247 + * + * @cname('__pyx_memoryview_err_extents') + * cdef int _err_extents(int i, Py_ssize_t extent1, # <<<<<<<<<<<<<< + * Py_ssize_t extent2) except -1 with gil: + * raise ValueError, f"got differing extents in dimension {i} (got {extent1} and {extent2})" + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView._err_extents", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1252 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error, msg % dim + * + */ + +static int __pyx_memoryview_err_dim(PyObject *__pyx_v_error, PyObject *__pyx_v_msg, int __pyx_v_dim) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_dim", 0); + __Pyx_INCREF(__pyx_v_msg); + + /* "View.MemoryView":1253 + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: + * raise error, msg % dim # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err') + */ + __pyx_t_1 = __Pyx_PyInt_From_int(__pyx_v_dim); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_PyString_FormatSafe(__pyx_v_msg, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(1, 1253, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_Raise(((PyObject *)__pyx_v_error), __pyx_t_2, 0, 0); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __PYX_ERR(1, 1253, __pyx_L1_error) + + /* "View.MemoryView":1252 + * + * @cname('__pyx_memoryview_err_dim') + * cdef int _err_dim(PyObject *error, str msg, int dim) except -1 with gil: # <<<<<<<<<<<<<< + * raise error, msg % dim + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_AddTraceback("View.MemoryView._err_dim", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_msg); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1256 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(PyObject *error, str msg) except -1 with gil: # <<<<<<<<<<<<<< + * raise error, msg + * + */ + +static int __pyx_memoryview_err(PyObject *__pyx_v_error, PyObject *__pyx_v_msg) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err", 0); + __Pyx_INCREF(__pyx_v_msg); + + /* "View.MemoryView":1257 + * @cname('__pyx_memoryview_err') + * cdef int _err(PyObject *error, str msg) except -1 with gil: + * raise error, msg # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_err_no_memory') + */ + __Pyx_Raise(((PyObject *)__pyx_v_error), __pyx_v_msg, 0, 0); + __PYX_ERR(1, 1257, __pyx_L1_error) + + /* "View.MemoryView":1256 + * + * @cname('__pyx_memoryview_err') + * cdef int _err(PyObject *error, str msg) except -1 with gil: # <<<<<<<<<<<<<< + * raise error, msg + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView._err", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_XDECREF(__pyx_v_msg); + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1260 + * + * @cname('__pyx_memoryview_err_no_memory') + * cdef int _err_no_memory() except -1 with gil: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + +static int __pyx_memoryview_err_no_memory(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("_err_no_memory", 0); + + /* "View.MemoryView":1261 + * @cname('__pyx_memoryview_err_no_memory') + * cdef int _err_no_memory() except -1 with gil: + * raise MemoryError # <<<<<<<<<<<<<< + * + * + */ + PyErr_NoMemory(); __PYX_ERR(1, 1261, __pyx_L1_error) + + /* "View.MemoryView":1260 + * + * @cname('__pyx_memoryview_err_no_memory') + * cdef int _err_no_memory() except -1 with gil: # <<<<<<<<<<<<<< + * raise MemoryError + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("View.MemoryView._err_no_memory", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + return __pyx_r; +} + +/* "View.MemoryView":1265 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + +static int __pyx_memoryview_copy_contents(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, int __pyx_v_src_ndim, int __pyx_v_dst_ndim, int __pyx_v_dtype_is_object) { + void *__pyx_v_tmpdata; + size_t __pyx_v_itemsize; + int __pyx_v_i; + char __pyx_v_order; + int __pyx_v_broadcasting; + int __pyx_v_direct_copy; + __Pyx_memviewslice __pyx_v_tmp; + int __pyx_v_ndim; + int __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + int __pyx_t_4; + int __pyx_t_5; + int __pyx_t_6; + void *__pyx_t_7; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save; + #endif + __Pyx_RefNannySetupContext("memoryview_copy_contents", 1); + + /* "View.MemoryView":1273 + * Check for overlapping memory and verify the shapes. + * """ + * cdef void *tmpdata = NULL # <<<<<<<<<<<<<< + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + */ + __pyx_v_tmpdata = NULL; + + /* "View.MemoryView":1274 + * """ + * cdef void *tmpdata = NULL + * cdef size_t itemsize = src.memview.view.itemsize # <<<<<<<<<<<<<< + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + */ + __pyx_t_1 = __pyx_v_src.memview->view.itemsize; + __pyx_v_itemsize = __pyx_t_1; + + /* "View.MemoryView":1276 + * cdef size_t itemsize = src.memview.view.itemsize + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) # <<<<<<<<<<<<<< + * cdef bint broadcasting = False + * cdef bint direct_copy = False + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_src), __pyx_v_src_ndim); + + /* "View.MemoryView":1277 + * cdef int i + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False # <<<<<<<<<<<<<< + * cdef bint direct_copy = False + * cdef __Pyx_memviewslice tmp + */ + __pyx_v_broadcasting = 0; + + /* "View.MemoryView":1278 + * cdef char order = get_best_order(&src, src_ndim) + * cdef bint broadcasting = False + * cdef bint direct_copy = False # <<<<<<<<<<<<<< + * cdef __Pyx_memviewslice tmp + * + */ + __pyx_v_direct_copy = 0; + + /* "View.MemoryView":1281 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + __pyx_t_2 = (__pyx_v_src_ndim < __pyx_v_dst_ndim); + if (__pyx_t_2) { + + /* "View.MemoryView":1282 + * + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_src), __pyx_v_src_ndim, __pyx_v_dst_ndim); + + /* "View.MemoryView":1281 + * cdef __Pyx_memviewslice tmp + * + * if src_ndim < dst_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1283 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + __pyx_t_2 = (__pyx_v_dst_ndim < __pyx_v_src_ndim); + if (__pyx_t_2) { + + /* "View.MemoryView":1284 + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: + * broadcast_leading(&dst, dst_ndim, src_ndim) # <<<<<<<<<<<<<< + * + * cdef int ndim = max(src_ndim, dst_ndim) + */ + __pyx_memoryview_broadcast_leading((&__pyx_v_dst), __pyx_v_dst_ndim, __pyx_v_src_ndim); + + /* "View.MemoryView":1283 + * if src_ndim < dst_ndim: + * broadcast_leading(&src, src_ndim, dst_ndim) + * elif dst_ndim < src_ndim: # <<<<<<<<<<<<<< + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + */ + } + __pyx_L3:; + + /* "View.MemoryView":1286 + * broadcast_leading(&dst, dst_ndim, src_ndim) + * + * cdef int ndim = max(src_ndim, dst_ndim) # <<<<<<<<<<<<<< + * + * for i in range(ndim): + */ + __pyx_t_3 = __pyx_v_dst_ndim; + __pyx_t_4 = __pyx_v_src_ndim; + __pyx_t_2 = (__pyx_t_3 > __pyx_t_4); + if (__pyx_t_2) { + __pyx_t_5 = __pyx_t_3; + } else { + __pyx_t_5 = __pyx_t_4; + } + __pyx_v_ndim = __pyx_t_5; + + /* "View.MemoryView":1288 + * cdef int ndim = max(src_ndim, dst_ndim) + * + * for i in range(ndim): # <<<<<<<<<<<<<< + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + */ + __pyx_t_5 = __pyx_v_ndim; + __pyx_t_3 = __pyx_t_5; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1289 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + __pyx_t_2 = ((__pyx_v_src.shape[__pyx_v_i]) != (__pyx_v_dst.shape[__pyx_v_i])); + if (__pyx_t_2) { + + /* "View.MemoryView":1290 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + __pyx_t_2 = ((__pyx_v_src.shape[__pyx_v_i]) == 1); + if (__pyx_t_2) { + + /* "View.MemoryView":1291 + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: + * broadcasting = True # <<<<<<<<<<<<<< + * src.strides[i] = 0 + * else: + */ + __pyx_v_broadcasting = 1; + + /* "View.MemoryView":1292 + * if src.shape[i] == 1: + * broadcasting = True + * src.strides[i] = 0 # <<<<<<<<<<<<<< + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) + */ + (__pyx_v_src.strides[__pyx_v_i]) = 0; + + /* "View.MemoryView":1290 + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: + * if src.shape[i] == 1: # <<<<<<<<<<<<<< + * broadcasting = True + * src.strides[i] = 0 + */ + goto __pyx_L7; + } + + /* "View.MemoryView":1294 + * src.strides[i] = 0 + * else: + * _err_extents(i, dst.shape[i], src.shape[i]) # <<<<<<<<<<<<<< + * + * if src.suboffsets[i] >= 0: + */ + /*else*/ { + __pyx_t_6 = __pyx_memoryview_err_extents(__pyx_v_i, (__pyx_v_dst.shape[__pyx_v_i]), (__pyx_v_src.shape[__pyx_v_i])); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1294, __pyx_L1_error) + } + __pyx_L7:; + + /* "View.MemoryView":1289 + * + * for i in range(ndim): + * if src.shape[i] != dst.shape[i]: # <<<<<<<<<<<<<< + * if src.shape[i] == 1: + * broadcasting = True + */ + } + + /* "View.MemoryView":1296 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) + * + */ + __pyx_t_2 = ((__pyx_v_src.suboffsets[__pyx_v_i]) >= 0); + if (__pyx_t_2) { + + /* "View.MemoryView":1297 + * + * if src.suboffsets[i] >= 0: + * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) # <<<<<<<<<<<<<< + * + * if slices_overlap(&src, &dst, ndim, itemsize): + */ + __pyx_t_6 = __pyx_memoryview_err_dim(PyExc_ValueError, __pyx_kp_s_Dimension_d_is_not_direct, __pyx_v_i); if (unlikely(__pyx_t_6 == ((int)-1))) __PYX_ERR(1, 1297, __pyx_L1_error) + + /* "View.MemoryView":1296 + * _err_extents(i, dst.shape[i], src.shape[i]) + * + * if src.suboffsets[i] >= 0: # <<<<<<<<<<<<<< + * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) + * + */ + } + } + + /* "View.MemoryView":1299 + * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + __pyx_t_2 = __pyx_slices_overlap((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); + if (__pyx_t_2) { + + /* "View.MemoryView":1301 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + __pyx_t_2 = (!__pyx_memviewslice_is_contig(__pyx_v_src, __pyx_v_order, __pyx_v_ndim)); + if (__pyx_t_2) { + + /* "View.MemoryView":1302 + * + * if not slice_is_contig(src, order, ndim): + * order = get_best_order(&dst, ndim) # <<<<<<<<<<<<<< + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + */ + __pyx_v_order = __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim); + + /* "View.MemoryView":1301 + * if slices_overlap(&src, &dst, ndim, itemsize): + * + * if not slice_is_contig(src, order, ndim): # <<<<<<<<<<<<<< + * order = get_best_order(&dst, ndim) + * + */ + } + + /* "View.MemoryView":1304 + * order = get_best_order(&dst, ndim) + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) # <<<<<<<<<<<<<< + * src = tmp + * + */ + __pyx_t_7 = __pyx_memoryview_copy_data_to_temp((&__pyx_v_src), (&__pyx_v_tmp), __pyx_v_order, __pyx_v_ndim); if (unlikely(__pyx_t_7 == ((void *)NULL))) __PYX_ERR(1, 1304, __pyx_L1_error) + __pyx_v_tmpdata = __pyx_t_7; + + /* "View.MemoryView":1305 + * + * tmpdata = copy_data_to_temp(&src, &tmp, order, ndim) + * src = tmp # <<<<<<<<<<<<<< + * + * if not broadcasting: + */ + __pyx_v_src = __pyx_v_tmp; + + /* "View.MemoryView":1299 + * _err_dim(PyExc_ValueError, "Dimension %d is not direct", i) + * + * if slices_overlap(&src, &dst, ndim, itemsize): # <<<<<<<<<<<<<< + * + * if not slice_is_contig(src, order, ndim): + */ + } + + /* "View.MemoryView":1307 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = (!__pyx_v_broadcasting); + if (__pyx_t_2) { + + /* "View.MemoryView":1310 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + __pyx_t_2 = __pyx_memviewslice_is_contig(__pyx_v_src, 'C', __pyx_v_ndim); + if (__pyx_t_2) { + + /* "View.MemoryView":1311 + * + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) # <<<<<<<<<<<<<< + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'C', __pyx_v_ndim); + + /* "View.MemoryView":1310 + * + * + * if slice_is_contig(src, 'C', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + */ + goto __pyx_L12; + } + + /* "View.MemoryView":1312 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + __pyx_t_2 = __pyx_memviewslice_is_contig(__pyx_v_src, 'F', __pyx_v_ndim); + if (__pyx_t_2) { + + /* "View.MemoryView":1313 + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): + * direct_copy = slice_is_contig(dst, 'F', ndim) # <<<<<<<<<<<<<< + * + * if direct_copy: + */ + __pyx_v_direct_copy = __pyx_memviewslice_is_contig(__pyx_v_dst, 'F', __pyx_v_ndim); + + /* "View.MemoryView":1312 + * if slice_is_contig(src, 'C', ndim): + * direct_copy = slice_is_contig(dst, 'C', ndim) + * elif slice_is_contig(src, 'F', ndim): # <<<<<<<<<<<<<< + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + */ + } + __pyx_L12:; + + /* "View.MemoryView":1315 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + */ + if (__pyx_v_direct_copy) { + + /* "View.MemoryView":1317 + * if direct_copy: + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) # <<<<<<<<<<<<<< + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1318 + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + * free(tmpdata) + */ + (void)(memcpy(__pyx_v_dst.data, __pyx_v_src.data, __pyx_memoryview_slice_get_size((&__pyx_v_src), __pyx_v_ndim))); + + /* "View.MemoryView":1319 + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) # <<<<<<<<<<<<<< + * free(tmpdata) + * return 0 + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1320 + * memcpy(dst.data, src.data, slice_get_size(&src, ndim)) + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1321 + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * if order == 'F' == get_best_order(&dst, ndim): + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1315 + * direct_copy = slice_is_contig(dst, 'F', ndim) + * + * if direct_copy: # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + */ + } + + /* "View.MemoryView":1307 + * src = tmp + * + * if not broadcasting: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1323 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_2 = (__pyx_v_order == 'F'); + if (__pyx_t_2) { + __pyx_t_2 = ('F' == __pyx_get_best_slice_order((&__pyx_v_dst), __pyx_v_ndim)); + } + if (__pyx_t_2) { + + /* "View.MemoryView":1326 + * + * + * transpose_memslice(&src) # <<<<<<<<<<<<<< + * transpose_memslice(&dst) + * + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_src)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 1326, __pyx_L1_error) + + /* "View.MemoryView":1327 + * + * transpose_memslice(&src) + * transpose_memslice(&dst) # <<<<<<<<<<<<<< + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + */ + __pyx_t_5 = __pyx_memslice_transpose((&__pyx_v_dst)); if (unlikely(__pyx_t_5 == ((int)-1))) __PYX_ERR(1, 1327, __pyx_L1_error) + + /* "View.MemoryView":1323 + * return 0 + * + * if order == 'F' == get_best_order(&dst, ndim): # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":1329 + * transpose_memslice(&dst) + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) # <<<<<<<<<<<<<< + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1330 + * + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) # <<<<<<<<<<<<<< + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + * + */ + copy_strided_to_strided((&__pyx_v_src), (&__pyx_v_dst), __pyx_v_ndim, __pyx_v_itemsize); + + /* "View.MemoryView":1331 + * refcount_copying(&dst, dtype_is_object, ndim, inc=False) + * copy_strided_to_strided(&src, &dst, ndim, itemsize) + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) # <<<<<<<<<<<<<< + * + * free(tmpdata) + */ + __pyx_memoryview_refcount_copying((&__pyx_v_dst), __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1333 + * refcount_copying(&dst, dtype_is_object, ndim, inc=True) + * + * free(tmpdata) # <<<<<<<<<<<<<< + * return 0 + * + */ + free(__pyx_v_tmpdata); + + /* "View.MemoryView":1334 + * + * free(tmpdata) + * return 0 # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_broadcast_leading') + */ + __pyx_r = 0; + goto __pyx_L0; + + /* "View.MemoryView":1265 + * + * @cname('__pyx_memoryview_copy_contents') + * cdef int memoryview_copy_contents(__Pyx_memviewslice src, # <<<<<<<<<<<<<< + * __Pyx_memviewslice dst, + * int src_ndim, int dst_ndim, + */ + + /* function exit code */ + __pyx_L1_error:; + #ifdef WITH_THREAD + __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_AddTraceback("View.MemoryView.memoryview_copy_contents", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif + __pyx_L0:; + __Pyx_RefNannyFinishContextNogil() + return __pyx_r; +} + +/* "View.MemoryView":1337 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) noexcept nogil: + */ + +static void __pyx_memoryview_broadcast_leading(__Pyx_memviewslice *__pyx_v_mslice, int __pyx_v_ndim, int __pyx_v_ndim_other) { + int __pyx_v_i; + int __pyx_v_offset; + int __pyx_t_1; + int __pyx_t_2; + int __pyx_t_3; + + /* "View.MemoryView":1341 + * int ndim_other) noexcept nogil: + * cdef int i + * cdef int offset = ndim_other - ndim # <<<<<<<<<<<<<< + * + * for i in range(ndim - 1, -1, -1): + */ + __pyx_v_offset = (__pyx_v_ndim_other - __pyx_v_ndim); + + /* "View.MemoryView":1343 + * cdef int offset = ndim_other - ndim + * + * for i in range(ndim - 1, -1, -1): # <<<<<<<<<<<<<< + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + */ + for (__pyx_t_1 = (__pyx_v_ndim - 1); __pyx_t_1 > -1; __pyx_t_1-=1) { + __pyx_v_i = __pyx_t_1; + + /* "View.MemoryView":1344 + * + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] # <<<<<<<<<<<<<< + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + */ + (__pyx_v_mslice->shape[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->shape[__pyx_v_i]); + + /* "View.MemoryView":1345 + * for i in range(ndim - 1, -1, -1): + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] # <<<<<<<<<<<<<< + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + */ + (__pyx_v_mslice->strides[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->strides[__pyx_v_i]); + + /* "View.MemoryView":1346 + * mslice.shape[i + offset] = mslice.shape[i] + * mslice.strides[i + offset] = mslice.strides[i] + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] # <<<<<<<<<<<<<< + * + * for i in range(offset): + */ + (__pyx_v_mslice->suboffsets[(__pyx_v_i + __pyx_v_offset)]) = (__pyx_v_mslice->suboffsets[__pyx_v_i]); + } + + /* "View.MemoryView":1348 + * mslice.suboffsets[i + offset] = mslice.suboffsets[i] + * + * for i in range(offset): # <<<<<<<<<<<<<< + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + */ + __pyx_t_1 = __pyx_v_offset; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1349 + * + * for i in range(offset): + * mslice.shape[i] = 1 # <<<<<<<<<<<<<< + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 + */ + (__pyx_v_mslice->shape[__pyx_v_i]) = 1; + + /* "View.MemoryView":1350 + * for i in range(offset): + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] # <<<<<<<<<<<<<< + * mslice.suboffsets[i] = -1 + * + */ + (__pyx_v_mslice->strides[__pyx_v_i]) = (__pyx_v_mslice->strides[0]); + + /* "View.MemoryView":1351 + * mslice.shape[i] = 1 + * mslice.strides[i] = mslice.strides[0] + * mslice.suboffsets[i] = -1 # <<<<<<<<<<<<<< + * + * + */ + (__pyx_v_mslice->suboffsets[__pyx_v_i]) = -1L; + } + + /* "View.MemoryView":1337 + * + * @cname('__pyx_memoryview_broadcast_leading') + * cdef void broadcast_leading(__Pyx_memviewslice *mslice, # <<<<<<<<<<<<<< + * int ndim, + * int ndim_other) noexcept nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1359 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: # <<<<<<<<<<<<<< + * + * if dtype_is_object: + */ + +static void __pyx_memoryview_refcount_copying(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_dtype_is_object, int __pyx_v_ndim, int __pyx_v_inc) { + + /* "View.MemoryView":1361 + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) + * + */ + if (__pyx_v_dtype_is_object) { + + /* "View.MemoryView":1362 + * + * if dtype_is_object: + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + */ + __pyx_memoryview_refcount_objects_in_slice_with_gil(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1361 + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: + * + * if dtype_is_object: # <<<<<<<<<<<<<< + * refcount_objects_in_slice_with_gil(dst.data, dst.shape, dst.strides, ndim, inc) + * + */ + } + + /* "View.MemoryView":1359 + * + * @cname('__pyx_memoryview_refcount_copying') + * cdef void refcount_copying(__Pyx_memviewslice *dst, bint dtype_is_object, int ndim, bint inc) noexcept nogil: # <<<<<<<<<<<<<< + * + * if dtype_is_object: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1365 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) noexcept with gil: + */ + +static void __pyx_memoryview_refcount_objects_in_slice_with_gil(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + __Pyx_RefNannyDeclarations + #ifdef WITH_THREAD + PyGILState_STATE __pyx_gilstate_save = __Pyx_PyGILState_Ensure(); + #endif + __Pyx_RefNannySetupContext("refcount_objects_in_slice_with_gil", 0); + + /* "View.MemoryView":1368 + * Py_ssize_t *strides, int ndim, + * bint inc) noexcept with gil: + * refcount_objects_in_slice(data, shape, strides, ndim, inc) # <<<<<<<<<<<<<< + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + */ + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, __pyx_v_shape, __pyx_v_strides, __pyx_v_ndim, __pyx_v_inc); + + /* "View.MemoryView":1365 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice_with_gil') + * cdef void refcount_objects_in_slice_with_gil(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * bint inc) noexcept with gil: + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); + #ifdef WITH_THREAD + __Pyx_PyGILState_Release(__pyx_gilstate_save); + #endif +} + +/* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc) noexcept: + * cdef Py_ssize_t i + */ + +static void __pyx_memoryview_refcount_objects_in_slice(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, int __pyx_v_inc) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_stride; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + __Pyx_RefNannySetupContext("refcount_objects_in_slice", 0); + + /* "View.MemoryView":1374 + * Py_ssize_t *strides, int ndim, bint inc) noexcept: + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< + * + * for i in range(shape[0]): + */ + __pyx_v_stride = (__pyx_v_strides[0]); + + /* "View.MemoryView":1376 + * cdef Py_ssize_t stride = strides[0] + * + * for i in range(shape[0]): # <<<<<<<<<<<<<< + * if ndim == 1: + * if inc: + */ + __pyx_t_1 = (__pyx_v_shape[0]); + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "View.MemoryView":1377 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + __pyx_t_4 = (__pyx_v_ndim == 1); + if (__pyx_t_4) { + + /* "View.MemoryView":1378 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + if (__pyx_v_inc) { + + /* "View.MemoryView":1379 + * if ndim == 1: + * if inc: + * Py_INCREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * Py_DECREF(( data)[0]) + */ + Py_INCREF((((PyObject **)__pyx_v_data)[0])); + + /* "View.MemoryView":1378 + * for i in range(shape[0]): + * if ndim == 1: + * if inc: # <<<<<<<<<<<<<< + * Py_INCREF(( data)[0]) + * else: + */ + goto __pyx_L6; + } + + /* "View.MemoryView":1381 + * Py_INCREF(( data)[0]) + * else: + * Py_DECREF(( data)[0]) # <<<<<<<<<<<<<< + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) + */ + /*else*/ { + Py_DECREF((((PyObject **)__pyx_v_data)[0])); + } + __pyx_L6:; + + /* "View.MemoryView":1377 + * + * for i in range(shape[0]): + * if ndim == 1: # <<<<<<<<<<<<<< + * if inc: + * Py_INCREF(( data)[0]) + */ + goto __pyx_L5; + } + + /* "View.MemoryView":1383 + * Py_DECREF(( data)[0]) + * else: + * refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) # <<<<<<<<<<<<<< + * + * data += stride + */ + /*else*/ { + __pyx_memoryview_refcount_objects_in_slice(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_inc); + } + __pyx_L5:; + + /* "View.MemoryView":1385 + * refcount_objects_in_slice(data, shape + 1, strides + 1, ndim - 1, inc) + * + * data += stride # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + + /* "View.MemoryView":1371 + * + * @cname('__pyx_memoryview_refcount_objects_in_slice') + * cdef void refcount_objects_in_slice(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, bint inc) noexcept: + * cdef Py_ssize_t i + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "View.MemoryView":1391 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) noexcept nogil: + */ + +static void __pyx_memoryview_slice_assign_scalar(__Pyx_memviewslice *__pyx_v_dst, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item, int __pyx_v_dtype_is_object) { + + /* "View.MemoryView":1394 + * size_t itemsize, void *item, + * bint dtype_is_object) noexcept nogil: + * refcount_copying(dst, dtype_is_object, ndim, inc=False) # <<<<<<<<<<<<<< + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, inc=True) + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 0); + + /* "View.MemoryView":1395 + * bint dtype_is_object) noexcept nogil: + * refcount_copying(dst, dtype_is_object, ndim, inc=False) + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) # <<<<<<<<<<<<<< + * refcount_copying(dst, dtype_is_object, ndim, inc=True) + * + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_dst->data, __pyx_v_dst->shape, __pyx_v_dst->strides, __pyx_v_ndim, __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1396 + * refcount_copying(dst, dtype_is_object, ndim, inc=False) + * _slice_assign_scalar(dst.data, dst.shape, dst.strides, ndim, itemsize, item) + * refcount_copying(dst, dtype_is_object, ndim, inc=True) # <<<<<<<<<<<<<< + * + * + */ + __pyx_memoryview_refcount_copying(__pyx_v_dst, __pyx_v_dtype_is_object, __pyx_v_ndim, 1); + + /* "View.MemoryView":1391 + * + * @cname('__pyx_memoryview_slice_assign_scalar') + * cdef void slice_assign_scalar(__Pyx_memviewslice *dst, int ndim, # <<<<<<<<<<<<<< + * size_t itemsize, void *item, + * bint dtype_is_object) noexcept nogil: + */ + + /* function exit code */ +} + +/* "View.MemoryView":1400 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) noexcept nogil: + */ + +static void __pyx_memoryview__slice_assign_scalar(char *__pyx_v_data, Py_ssize_t *__pyx_v_shape, Py_ssize_t *__pyx_v_strides, int __pyx_v_ndim, size_t __pyx_v_itemsize, void *__pyx_v_item) { + CYTHON_UNUSED Py_ssize_t __pyx_v_i; + Py_ssize_t __pyx_v_stride; + Py_ssize_t __pyx_v_extent; + int __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + + /* "View.MemoryView":1404 + * size_t itemsize, void *item) noexcept nogil: + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t extent = shape[0] + * + */ + __pyx_v_stride = (__pyx_v_strides[0]); + + /* "View.MemoryView":1405 + * cdef Py_ssize_t i + * cdef Py_ssize_t stride = strides[0] + * cdef Py_ssize_t extent = shape[0] # <<<<<<<<<<<<<< + * + * if ndim == 1: + */ + __pyx_v_extent = (__pyx_v_shape[0]); + + /* "View.MemoryView":1407 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + __pyx_t_1 = (__pyx_v_ndim == 1); + if (__pyx_t_1) { + + /* "View.MemoryView":1408 + * + * if ndim == 1: + * for i in range(extent): # <<<<<<<<<<<<<< + * memcpy(data, item, itemsize) + * data += stride + */ + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1409 + * if ndim == 1: + * for i in range(extent): + * memcpy(data, item, itemsize) # <<<<<<<<<<<<<< + * data += stride + * else: + */ + (void)(memcpy(__pyx_v_data, __pyx_v_item, __pyx_v_itemsize)); + + /* "View.MemoryView":1410 + * for i in range(extent): + * memcpy(data, item, itemsize) + * data += stride # <<<<<<<<<<<<<< + * else: + * for i in range(extent): + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + + /* "View.MemoryView":1407 + * cdef Py_ssize_t extent = shape[0] + * + * if ndim == 1: # <<<<<<<<<<<<<< + * for i in range(extent): + * memcpy(data, item, itemsize) + */ + goto __pyx_L3; + } + + /* "View.MemoryView":1412 + * data += stride + * else: + * for i in range(extent): # <<<<<<<<<<<<<< + * _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) + * data += stride + */ + /*else*/ { + __pyx_t_2 = __pyx_v_extent; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_v_i = __pyx_t_4; + + /* "View.MemoryView":1413 + * else: + * for i in range(extent): + * _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) # <<<<<<<<<<<<<< + * data += stride + * + */ + __pyx_memoryview__slice_assign_scalar(__pyx_v_data, (__pyx_v_shape + 1), (__pyx_v_strides + 1), (__pyx_v_ndim - 1), __pyx_v_itemsize, __pyx_v_item); + + /* "View.MemoryView":1414 + * for i in range(extent): + * _slice_assign_scalar(data, shape + 1, strides + 1, ndim - 1, itemsize, item) + * data += stride # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_data = (__pyx_v_data + __pyx_v_stride); + } + } + __pyx_L3:; + + /* "View.MemoryView":1400 + * + * @cname('__pyx_memoryview__slice_assign_scalar') + * cdef void _slice_assign_scalar(char *data, Py_ssize_t *shape, # <<<<<<<<<<<<<< + * Py_ssize_t *strides, int ndim, + * size_t itemsize, void *item) noexcept nogil: + */ + + /* function exit code */ +} + +/* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyMethodDef __pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum = {"__pyx_unpickle_Enum", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_15View_dot_MemoryView_1__pyx_unpickle_Enum(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v___pyx_type = 0; + long __pyx_v___pyx_checksum; + PyObject *__pyx_v___pyx_state = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[3] = {0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(1, 1, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_pyx_type,&__pyx_n_s_pyx_checksum,&__pyx_n_s_pyx_state,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_type)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_checksum)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 1); __PYX_ERR(1, 1, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_pyx_state)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[2]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, 2); __PYX_ERR(1, 1, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "__pyx_unpickle_Enum") < 0)) __PYX_ERR(1, 1, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 3)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + } + __pyx_v___pyx_type = values[0]; + __pyx_v___pyx_checksum = __Pyx_PyInt_As_long(values[1]); if (unlikely((__pyx_v___pyx_checksum == (long)-1) && PyErr_Occurred())) __PYX_ERR(1, 1, __pyx_L3_error) + __pyx_v___pyx_state = values[2]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("__pyx_unpickle_Enum", 1, 3, 3, __pyx_nargs); __PYX_ERR(1, 1, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(__pyx_self, __pyx_v___pyx_type, __pyx_v___pyx_checksum, __pyx_v___pyx_state); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_15View_dot_MemoryView___pyx_unpickle_Enum(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v___pyx_type, long __pyx_v___pyx_checksum, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_v___pyx_PickleError = 0; + PyObject *__pyx_v___pyx_result = 0; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum", 0); + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + */ + __pyx_t_1 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = (__Pyx_PySequence_ContainsTF(__pyx_t_1, __pyx_tuple__8, Py_NE)); if (unlikely((__pyx_t_2 < 0))) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + if (__pyx_t_2) { + + /* "(tree fragment)":5 + * cdef object __pyx_result + * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): + * from pickle import PickleError as __pyx_PickleError # <<<<<<<<<<<<<< + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + * __pyx_result = Enum.__new__(__pyx_type) + */ + __pyx_t_1 = PyList_New(1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_n_s_PickleError); + __Pyx_GIVEREF(__pyx_n_s_PickleError); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, 0, __pyx_n_s_PickleError)) __PYX_ERR(1, 5, __pyx_L1_error); + __pyx_t_3 = __Pyx_Import(__pyx_n_s_pickle, __pyx_t_1, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = __Pyx_ImportFrom(__pyx_t_3, __pyx_n_s_PickleError); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 5, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_INCREF(__pyx_t_1); + __pyx_v___pyx_PickleError = __pyx_t_1; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + + /* "(tree fragment)":6 + * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum # <<<<<<<<<<<<<< + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + */ + __pyx_t_3 = __Pyx_PyInt_From_long(__pyx_v___pyx_checksum); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = __Pyx_PyString_Format(__pyx_kp_s_Incompatible_checksums_0x_x_vs_0, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 6, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_Raise(__pyx_v___pyx_PickleError, __pyx_t_1, 0, 0); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __PYX_ERR(1, 6, __pyx_L1_error) + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + */ + } + + /* "(tree fragment)":7 + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + * __pyx_result = Enum.__new__(__pyx_type) # <<<<<<<<<<<<<< + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + */ + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_MemviewEnum_type), __pyx_n_s_new); if (unlikely(!__pyx_t_3)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = NULL; + __pyx_t_5 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_3))) { + __pyx_t_4 = PyMethod_GET_SELF(__pyx_t_3); + if (likely(__pyx_t_4)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_3); + __Pyx_INCREF(__pyx_t_4); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_3, function); + __pyx_t_5 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_4, __pyx_v___pyx_type}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_3, __pyx_callargs+1-__pyx_t_5, 1+__pyx_t_5); + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 7, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + } + __pyx_v___pyx_result = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + __pyx_t_2 = (__pyx_v___pyx_state != Py_None); + if (__pyx_t_2) { + + /* "(tree fragment)":9 + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) # <<<<<<<<<<<<<< + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + */ + if (!(likely(PyTuple_CheckExact(__pyx_v___pyx_state))||((__pyx_v___pyx_state) == Py_None) || __Pyx_RaiseUnexpectedTypeError("tuple", __pyx_v___pyx_state))) __PYX_ERR(1, 9, __pyx_L1_error) + __pyx_t_1 = __pyx_unpickle_Enum__set_state(((struct __pyx_MemviewEnum_obj *)__pyx_v___pyx_result), ((PyObject*)__pyx_v___pyx_state)); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":8 + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + * __pyx_result = Enum.__new__(__pyx_type) + * if __pyx_state is not None: # <<<<<<<<<<<<<< + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + */ + } + + /* "(tree fragment)":10 + * if __pyx_state is not None: + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result # <<<<<<<<<<<<<< + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v___pyx_result); + __pyx_r = __pyx_v___pyx_result; + goto __pyx_L0; + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v___pyx_PickleError); + __Pyx_XDECREF(__pyx_v___pyx_result); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + +static PyObject *__pyx_unpickle_Enum__set_state(struct __pyx_MemviewEnum_obj *__pyx_v___pyx_result, PyObject *__pyx_v___pyx_state) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_t_2; + Py_ssize_t __pyx_t_3; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + int __pyx_t_8; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__pyx_unpickle_Enum__set_state", 0); + + /* "(tree fragment)":12 + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] # <<<<<<<<<<<<<< + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 12, __pyx_L1_error) + } + __pyx_t_1 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 12, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_1); + __Pyx_GOTREF(__pyx_v___pyx_result->name); + __Pyx_DECREF(__pyx_v___pyx_result->name); + __pyx_v___pyx_result->name = __pyx_t_1; + __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "object of type 'NoneType' has no len()"); + __PYX_ERR(1, 13, __pyx_L1_error) + } + __pyx_t_3 = __Pyx_PyTuple_GET_SIZE(__pyx_v___pyx_state); if (unlikely(__pyx_t_3 == ((Py_ssize_t)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_4 = (__pyx_t_3 > 1); + if (__pyx_t_4) { + } else { + __pyx_t_2 = __pyx_t_4; + goto __pyx_L4_bool_binop_done; + } + __pyx_t_4 = __Pyx_HasAttr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(1, 13, __pyx_L1_error) + __pyx_t_2 = __pyx_t_4; + __pyx_L4_bool_binop_done:; + if (__pyx_t_2) { + + /* "(tree fragment)":14 + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + * __pyx_result.__dict__.update(__pyx_state[1]) # <<<<<<<<<<<<<< + */ + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(((PyObject *)__pyx_v___pyx_result), __pyx_n_s_dict); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_update); if (unlikely(!__pyx_t_6)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(__pyx_v___pyx_state == Py_None)) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not subscriptable"); + __PYX_ERR(1, 14, __pyx_L1_error) + } + __pyx_t_5 = __Pyx_GetItemInt_Tuple(__pyx_v___pyx_state, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 1); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_7 = NULL; + __pyx_t_8 = 0; + #if CYTHON_UNPACK_METHODS + if (likely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_7 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_7)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_7); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_8 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_7, __pyx_t_5}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_8, 1+__pyx_t_8); + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "(tree fragment)":13 + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): # <<<<<<<<<<<<<< + * __pyx_result.__dict__.update(__pyx_state[1]) + */ + } + + /* "(tree fragment)":11 + * __pyx_unpickle_Enum__set_state( __pyx_result, __pyx_state) + * return __pyx_result + * cdef __pyx_unpickle_Enum__set_state(Enum __pyx_result, tuple __pyx_state): # <<<<<<<<<<<<<< + * __pyx_result.name = __pyx_state[0] + * if len(__pyx_state) > 1 and hasattr(__pyx_result, '__dict__'): + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_AddTraceback("View.MemoryView.__pyx_unpickle_Enum__set_state", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":245 + * + * @property + * cdef inline PyObject* base(self) nogil: # <<<<<<<<<<<<<< + * """Returns a borrowed reference to the object owning the data/memory. + * """ + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_7ndarray_4base_base(PyArrayObject *__pyx_v_self) { + PyObject *__pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":248 + * """Returns a borrowed reference to the object owning the data/memory. + * """ + * return PyArray_BASE(self) # <<<<<<<<<<<<<< + * + * @property + */ + __pyx_r = PyArray_BASE(__pyx_v_self); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":245 + * + * @property + * cdef inline PyObject* base(self) nogil: # <<<<<<<<<<<<<< + * """Returns a borrowed reference to the object owning the data/memory. + * """ + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":251 + * + * @property + * cdef inline dtype descr(self): # <<<<<<<<<<<<<< + * """Returns an owned reference to the dtype of the array. + * """ + */ + +static CYTHON_INLINE PyArray_Descr *__pyx_f_5numpy_7ndarray_5descr_descr(PyArrayObject *__pyx_v_self) { + PyArray_Descr *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyArray_Descr *__pyx_t_1; + __Pyx_RefNannySetupContext("descr", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":254 + * """Returns an owned reference to the dtype of the array. + * """ + * return PyArray_DESCR(self) # <<<<<<<<<<<<<< + * + * @property + */ + __Pyx_XDECREF((PyObject *)__pyx_r); + __pyx_t_1 = PyArray_DESCR(__pyx_v_self); + __Pyx_INCREF((PyObject *)((PyArray_Descr *)__pyx_t_1)); + __pyx_r = ((PyArray_Descr *)__pyx_t_1); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":251 + * + * @property + * cdef inline dtype descr(self): # <<<<<<<<<<<<<< + * """Returns an owned reference to the dtype of the array. + * """ + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF((PyObject *)__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":257 + * + * @property + * cdef inline int ndim(self) nogil: # <<<<<<<<<<<<<< + * """Returns the number of dimensions in the array. + * """ + */ + +static CYTHON_INLINE int __pyx_f_5numpy_7ndarray_4ndim_ndim(PyArrayObject *__pyx_v_self) { + int __pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":260 + * """Returns the number of dimensions in the array. + * """ + * return PyArray_NDIM(self) # <<<<<<<<<<<<<< + * + * @property + */ + __pyx_r = PyArray_NDIM(__pyx_v_self); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":257 + * + * @property + * cdef inline int ndim(self) nogil: # <<<<<<<<<<<<<< + * """Returns the number of dimensions in the array. + * """ + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":263 + * + * @property + * cdef inline npy_intp *shape(self) nogil: # <<<<<<<<<<<<<< + * """Returns a pointer to the dimensions/shape of the array. + * The number of elements matches the number of dimensions of the array (ndim). + */ + +static CYTHON_INLINE npy_intp *__pyx_f_5numpy_7ndarray_5shape_shape(PyArrayObject *__pyx_v_self) { + npy_intp *__pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":268 + * Can return NULL for 0-dimensional arrays. + * """ + * return PyArray_DIMS(self) # <<<<<<<<<<<<<< + * + * @property + */ + __pyx_r = PyArray_DIMS(__pyx_v_self); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":263 + * + * @property + * cdef inline npy_intp *shape(self) nogil: # <<<<<<<<<<<<<< + * """Returns a pointer to the dimensions/shape of the array. + * The number of elements matches the number of dimensions of the array (ndim). + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":271 + * + * @property + * cdef inline npy_intp *strides(self) nogil: # <<<<<<<<<<<<<< + * """Returns a pointer to the strides of the array. + * The number of elements matches the number of dimensions of the array (ndim). + */ + +static CYTHON_INLINE npy_intp *__pyx_f_5numpy_7ndarray_7strides_strides(PyArrayObject *__pyx_v_self) { + npy_intp *__pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":275 + * The number of elements matches the number of dimensions of the array (ndim). + * """ + * return PyArray_STRIDES(self) # <<<<<<<<<<<<<< + * + * @property + */ + __pyx_r = PyArray_STRIDES(__pyx_v_self); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":271 + * + * @property + * cdef inline npy_intp *strides(self) nogil: # <<<<<<<<<<<<<< + * """Returns a pointer to the strides of the array. + * The number of elements matches the number of dimensions of the array (ndim). + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":278 + * + * @property + * cdef inline npy_intp size(self) nogil: # <<<<<<<<<<<<<< + * """Returns the total size (in number of elements) of the array. + * """ + */ + +static CYTHON_INLINE npy_intp __pyx_f_5numpy_7ndarray_4size_size(PyArrayObject *__pyx_v_self) { + npy_intp __pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":281 + * """Returns the total size (in number of elements) of the array. + * """ + * return PyArray_SIZE(self) # <<<<<<<<<<<<<< + * + * @property + */ + __pyx_r = PyArray_SIZE(__pyx_v_self); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":278 + * + * @property + * cdef inline npy_intp size(self) nogil: # <<<<<<<<<<<<<< + * """Returns the total size (in number of elements) of the array. + * """ + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":284 + * + * @property + * cdef inline char* data(self) nogil: # <<<<<<<<<<<<<< + * """The pointer to the data buffer as a char*. + * This is provided for legacy reasons to avoid direct struct field access. + */ + +static CYTHON_INLINE char *__pyx_f_5numpy_7ndarray_4data_data(PyArrayObject *__pyx_v_self) { + char *__pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":290 + * of `PyArray_DATA()` instead, which returns a 'void*'. + * """ + * return PyArray_BYTES(self) # <<<<<<<<<<<<<< + * + * ctypedef unsigned char npy_bool + */ + __pyx_r = PyArray_BYTES(__pyx_v_self); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":284 + * + * @property + * cdef inline char* data(self) nogil: # <<<<<<<<<<<<<< + * """The pointer to the data buffer as a char*. + * This is provided for legacy reasons to avoid direct struct field access. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":776 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew1(PyObject *__pyx_v_a) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew1", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":777 + * + * cdef inline object PyArray_MultiIterNew1(a): + * return PyArray_MultiIterNew(1, a) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew2(a, b): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(1, ((void *)__pyx_v_a)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 777, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":776 + * ctypedef npy_cdouble complex_t + * + * cdef inline object PyArray_MultiIterNew1(a): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(1, a) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew1", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":779 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew2(PyObject *__pyx_v_a, PyObject *__pyx_v_b) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew2", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":780 + * + * cdef inline object PyArray_MultiIterNew2(a, b): + * return PyArray_MultiIterNew(2, a, b) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(2, ((void *)__pyx_v_a), ((void *)__pyx_v_b)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 780, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":779 + * return PyArray_MultiIterNew(1, a) + * + * cdef inline object PyArray_MultiIterNew2(a, b): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(2, a, b) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew2", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":782 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew3(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew3", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":783 + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): + * return PyArray_MultiIterNew(3, a, b, c) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(3, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 783, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":782 + * return PyArray_MultiIterNew(2, a, b) + * + * cdef inline object PyArray_MultiIterNew3(a, b, c): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(3, a, b, c) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew3", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":785 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew4(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew4", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":786 + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): + * return PyArray_MultiIterNew(4, a, b, c, d) # <<<<<<<<<<<<<< + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(4, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 786, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":785 + * return PyArray_MultiIterNew(3, a, b, c) + * + * cdef inline object PyArray_MultiIterNew4(a, b, c, d): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(4, a, b, c, d) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew4", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":788 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyArray_MultiIterNew5(PyObject *__pyx_v_a, PyObject *__pyx_v_b, PyObject *__pyx_v_c, PyObject *__pyx_v_d, PyObject *__pyx_v_e) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("PyArray_MultiIterNew5", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":789 + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): + * return PyArray_MultiIterNew(5, a, b, c, d, e) # <<<<<<<<<<<<<< + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_1 = PyArray_MultiIterNew(5, ((void *)__pyx_v_a), ((void *)__pyx_v_b), ((void *)__pyx_v_c), ((void *)__pyx_v_d), ((void *)__pyx_v_e)); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 789, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_r = __pyx_t_1; + __pyx_t_1 = 0; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":788 + * return PyArray_MultiIterNew(4, a, b, c, d) + * + * cdef inline object PyArray_MultiIterNew5(a, b, c, d, e): # <<<<<<<<<<<<<< + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_AddTraceback("numpy.PyArray_MultiIterNew5", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = 0; + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":791 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_PyDataType_SHAPE(PyArray_Descr *__pyx_v_d) { + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("PyDataType_SHAPE", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":792 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + __pyx_t_1 = PyDataType_HASSUBARRAY(__pyx_v_d); + if (__pyx_t_1) { + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":793 + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape # <<<<<<<<<<<<<< + * else: + * return () + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject*)__pyx_v_d->subarray->shape)); + __pyx_r = ((PyObject*)__pyx_v_d->subarray->shape); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":792 + * + * cdef inline tuple PyDataType_SHAPE(dtype d): + * if PyDataType_HASSUBARRAY(d): # <<<<<<<<<<<<<< + * return d.subarray.shape + * else: + */ + } + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":795 + * return d.subarray.shape + * else: + * return () # <<<<<<<<<<<<<< + * + * + */ + /*else*/ { + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_empty_tuple); + __pyx_r = __pyx_empty_tuple; + goto __pyx_L0; + } + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":791 + * return PyArray_MultiIterNew(5, a, b, c, d, e) + * + * cdef inline tuple PyDataType_SHAPE(dtype d): # <<<<<<<<<<<<<< + * if PyDataType_HASSUBARRAY(d): + * return d.subarray.shape + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":970 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + +static CYTHON_INLINE void __pyx_f_5numpy_set_array_base(PyArrayObject *__pyx_v_arr, PyObject *__pyx_v_base) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("set_array_base", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":971 + * + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! # <<<<<<<<<<<<<< + * PyArray_SetBaseObject(arr, base) + * + */ + Py_INCREF(__pyx_v_base); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":972 + * cdef inline void set_array_base(ndarray arr, object base): + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) # <<<<<<<<<<<<<< + * + * cdef inline object get_array_base(ndarray arr): + */ + (void)(PyArray_SetBaseObject(__pyx_v_arr, __pyx_v_base)); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":970 + * int _import_umath() except -1 + * + * cdef inline void set_array_base(ndarray arr, object base): # <<<<<<<<<<<<<< + * Py_INCREF(base) # important to do this before stealing the reference below! + * PyArray_SetBaseObject(arr, base) + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":974 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + +static CYTHON_INLINE PyObject *__pyx_f_5numpy_get_array_base(PyArrayObject *__pyx_v_arr) { + PyObject *__pyx_v_base; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + int __pyx_t_1; + __Pyx_RefNannySetupContext("get_array_base", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":975 + * + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) # <<<<<<<<<<<<<< + * if base is NULL: + * return None + */ + __pyx_v_base = PyArray_BASE(__pyx_v_arr); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":976 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + __pyx_t_1 = (__pyx_v_base == NULL); + if (__pyx_t_1) { + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":977 + * base = PyArray_BASE(arr) + * if base is NULL: + * return None # <<<<<<<<<<<<<< + * return base + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":976 + * cdef inline object get_array_base(ndarray arr): + * base = PyArray_BASE(arr) + * if base is NULL: # <<<<<<<<<<<<<< + * return None + * return base + */ + } + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":978 + * if base is NULL: + * return None + * return base # <<<<<<<<<<<<<< + * + * # Versions of the import_* functions which are more suitable for + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(((PyObject *)__pyx_v_base)); + __pyx_r = ((PyObject *)__pyx_v_base); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":974 + * PyArray_SetBaseObject(arr, base) + * + * cdef inline object get_array_base(ndarray arr): # <<<<<<<<<<<<<< + * base = PyArray_BASE(arr) + * if base is NULL: + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":982 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_array(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_array", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":983 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":984 + * cdef inline int import_array() except -1: + * try: + * __pyx_import_array() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") + */ + __pyx_t_4 = _import_array(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 984, __pyx_L3_error) + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":983 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":985 + * try: + * __pyx_import_array() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.multiarray failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 985, __pyx_L5_except_error) + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_7); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":986 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__9, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 986, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(2, 986, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":983 + * # Cython code. + * cdef inline int import_array() except -1: + * try: # <<<<<<<<<<<<<< + * __pyx_import_array() + * except Exception: + */ + __pyx_L5_except_error:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":982 + * # Versions of the import_* functions which are more suitable for + * # Cython code. + * cdef inline int import_array() except -1: # <<<<<<<<<<<<<< + * try: + * __pyx_import_array() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_array", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":988 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_umath(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_umath", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":989 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":990 + * cdef inline int import_umath() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 990, __pyx_L3_error) + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":989 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":991 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 991, __pyx_L5_except_error) + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_7); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":992 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 992, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(2, 992, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":989 + * + * cdef inline int import_umath() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __pyx_L5_except_error:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":988 + * raise ImportError("numpy.core.multiarray failed to import") + * + * cdef inline int import_umath() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_umath", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":994 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + +static CYTHON_INLINE int __pyx_f_5numpy_import_ufunc(void) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + int __pyx_t_4; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + PyObject *__pyx_t_7 = NULL; + PyObject *__pyx_t_8 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("import_ufunc", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":995 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":996 + * cdef inline int import_ufunc() except -1: + * try: + * _import_umath() # <<<<<<<<<<<<<< + * except Exception: + * raise ImportError("numpy.core.umath failed to import") + */ + __pyx_t_4 = _import_umath(); if (unlikely(__pyx_t_4 == ((int)-1))) __PYX_ERR(2, 996, __pyx_L3_error) + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":995 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L8_try_end; + __pyx_L3_error:; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":997 + * try: + * _import_umath() + * except Exception: # <<<<<<<<<<<<<< + * raise ImportError("numpy.core.umath failed to import") + * + */ + __pyx_t_4 = __Pyx_PyErr_ExceptionMatches(((PyObject *)(&((PyTypeObject*)PyExc_Exception)[0]))); + if (__pyx_t_4) { + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_6, &__pyx_t_7) < 0) __PYX_ERR(2, 997, __pyx_L5_except_error) + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_6); + __Pyx_XGOTREF(__pyx_t_7); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":998 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_8 = __Pyx_PyObject_Call(__pyx_builtin_ImportError, __pyx_tuple__10, NULL); if (unlikely(!__pyx_t_8)) __PYX_ERR(2, 998, __pyx_L5_except_error) + __Pyx_GOTREF(__pyx_t_8); + __Pyx_Raise(__pyx_t_8, 0, 0, 0); + __Pyx_DECREF(__pyx_t_8); __pyx_t_8 = 0; + __PYX_ERR(2, 998, __pyx_L5_except_error) + } + goto __pyx_L5_except_error; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":995 + * + * cdef inline int import_ufunc() except -1: + * try: # <<<<<<<<<<<<<< + * _import_umath() + * except Exception: + */ + __pyx_L5_except_error:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L8_try_end:; + } + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":994 + * raise ImportError("numpy.core.umath failed to import") + * + * cdef inline int import_ufunc() except -1: # <<<<<<<<<<<<<< + * try: + * _import_umath() + */ + + /* function exit code */ + __pyx_r = 0; + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_7); + __Pyx_XDECREF(__pyx_t_8); + __Pyx_AddTraceback("numpy.import_ufunc", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1001 + * + * + * cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<< + * """ + * Cython equivalent of `isinstance(obj, np.timedelta64)` + */ + +static CYTHON_INLINE int __pyx_f_5numpy_is_timedelta64_object(PyObject *__pyx_v_obj) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_timedelta64_object", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1013 + * bool + * """ + * return PyObject_TypeCheck(obj, &PyTimedeltaArrType_Type) # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyTimedeltaArrType_Type)); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1001 + * + * + * cdef inline bint is_timedelta64_object(object obj): # <<<<<<<<<<<<<< + * """ + * Cython equivalent of `isinstance(obj, np.timedelta64)` + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1016 + * + * + * cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<< + * """ + * Cython equivalent of `isinstance(obj, np.datetime64)` + */ + +static CYTHON_INLINE int __pyx_f_5numpy_is_datetime64_object(PyObject *__pyx_v_obj) { + int __pyx_r; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("is_datetime64_object", 0); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1028 + * bool + * """ + * return PyObject_TypeCheck(obj, &PyDatetimeArrType_Type) # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = PyObject_TypeCheck(__pyx_v_obj, (&PyDatetimeArrType_Type)); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1016 + * + * + * cdef inline bint is_datetime64_object(object obj): # <<<<<<<<<<<<<< + * """ + * Cython equivalent of `isinstance(obj, np.datetime64)` + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1031 + * + * + * cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<< + * """ + * returns the int64 value underlying scalar numpy datetime64 object + */ + +static CYTHON_INLINE npy_datetime __pyx_f_5numpy_get_datetime64_value(PyObject *__pyx_v_obj) { + npy_datetime __pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1038 + * also needed. That can be found using `get_datetime64_unit`. + * """ + * return (obj).obval # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = ((PyDatetimeScalarObject *)__pyx_v_obj)->obval; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1031 + * + * + * cdef inline npy_datetime get_datetime64_value(object obj) nogil: # <<<<<<<<<<<<<< + * """ + * returns the int64 value underlying scalar numpy datetime64 object + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1041 + * + * + * cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<< + * """ + * returns the int64 value underlying scalar numpy timedelta64 object + */ + +static CYTHON_INLINE npy_timedelta __pyx_f_5numpy_get_timedelta64_value(PyObject *__pyx_v_obj) { + npy_timedelta __pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1045 + * returns the int64 value underlying scalar numpy timedelta64 object + * """ + * return (obj).obval # <<<<<<<<<<<<<< + * + * + */ + __pyx_r = ((PyTimedeltaScalarObject *)__pyx_v_obj)->obval; + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1041 + * + * + * cdef inline npy_timedelta get_timedelta64_value(object obj) nogil: # <<<<<<<<<<<<<< + * """ + * returns the int64 value underlying scalar numpy timedelta64 object + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1048 + * + * + * cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<< + * """ + * returns the unit part of the dtype for a numpy datetime64 object. + */ + +static CYTHON_INLINE NPY_DATETIMEUNIT __pyx_f_5numpy_get_datetime64_unit(PyObject *__pyx_v_obj) { + NPY_DATETIMEUNIT __pyx_r; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1052 + * returns the unit part of the dtype for a numpy datetime64 object. + * """ + * return (obj).obmeta.base # <<<<<<<<<<<<<< + */ + __pyx_r = ((NPY_DATETIMEUNIT)((PyDatetimeScalarObject *)__pyx_v_obj)->obmeta.base); + goto __pyx_L0; + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":1048 + * + * + * cdef inline NPY_DATETIMEUNIT get_datetime64_unit(object obj) nogil: # <<<<<<<<<<<<<< + * """ + * returns the unit part of the dtype for a numpy datetime64 object. + */ + + /* function exit code */ + __pyx_L0:; + return __pyx_r; +} + +/* "box_intersection.pyx":11 + * FLOAT = np.float32 + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def computeIntersection(cp1, cp2, s, e): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_16box_intersection_1computeIntersection(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +static PyMethodDef __pyx_mdef_16box_intersection_1computeIntersection = {"computeIntersection", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_16box_intersection_1computeIntersection, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}; +static PyObject *__pyx_pw_16box_intersection_1computeIntersection(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + PyObject *__pyx_v_cp1 = 0; + PyObject *__pyx_v_cp2 = 0; + PyObject *__pyx_v_s = 0; + PyObject *__pyx_v_e = 0; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[4] = {0,0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("computeIntersection (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(0, 11, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_cp1,&__pyx_n_s_cp2,&__pyx_n_s_s,&__pyx_n_s_e,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_cp1)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_cp2)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("computeIntersection", 1, 4, 4, 1); __PYX_ERR(0, 11, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_s)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[2]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("computeIntersection", 1, 4, 4, 2); __PYX_ERR(0, 11, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_e)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[3]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 11, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("computeIntersection", 1, 4, 4, 3); __PYX_ERR(0, 11, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "computeIntersection") < 0)) __PYX_ERR(0, 11, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 4)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); + } + __pyx_v_cp1 = values[0]; + __pyx_v_cp2 = values[1]; + __pyx_v_s = values[2]; + __pyx_v_e = values[3]; + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("computeIntersection", 1, 4, 4, __pyx_nargs); __PYX_ERR(0, 11, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_AddTraceback("box_intersection.computeIntersection", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_16box_intersection_computeIntersection(__pyx_self, __pyx_v_cp1, __pyx_v_cp2, __pyx_v_s, __pyx_v_e); + + /* function exit code */ + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_16box_intersection_computeIntersection(CYTHON_UNUSED PyObject *__pyx_self, PyObject *__pyx_v_cp1, PyObject *__pyx_v_cp2, PyObject *__pyx_v_s, PyObject *__pyx_v_e) { + PyObject *__pyx_v_dc = NULL; + PyObject *__pyx_v_dp = NULL; + PyObject *__pyx_v_n1 = NULL; + PyObject *__pyx_v_n2 = NULL; + PyObject *__pyx_v_n3 = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("computeIntersection", 0); + + /* "box_intersection.pyx":14 + * @cython.wraparound(False) + * def computeIntersection(cp1, cp2, s, e): + * dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ] # <<<<<<<<<<<<<< + * dp = [ s[0] - e[0], s[1] - e[1] ] + * n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cp1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cp2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cp1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cp2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyNumber_Subtract(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 14, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_3)) __PYX_ERR(0, 14, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, 1, __pyx_t_4)) __PYX_ERR(0, 14, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_4 = 0; + __pyx_v_dc = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "box_intersection.pyx":15 + * def computeIntersection(cp1, cp2, s, e): + * dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ] + * dp = [ s[0] - e[0], s[1] - e[1] ] # <<<<<<<<<<<<<< + * n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + * n2 = s[0] * e[1] - s[1] * e[0] + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_s, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_e, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_s, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_e, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyNumber_Subtract(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyList_New(2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 15, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, 0, __pyx_t_3)) __PYX_ERR(0, 15, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyList_SET_ITEM(__pyx_t_1, 1, __pyx_t_2)) __PYX_ERR(0, 15, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_2 = 0; + __pyx_v_dp = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "box_intersection.pyx":16 + * dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ] + * dp = [ s[0] - e[0], s[1] - e[1] ] + * n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] # <<<<<<<<<<<<<< + * n2 = s[0] * e[1] - s[1] * e[0] + * n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cp1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cp2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cp1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cp2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyNumber_Subtract(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 16, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_n1 = __pyx_t_1; + __pyx_t_1 = 0; + + /* "box_intersection.pyx":17 + * dp = [ s[0] - e[0], s[1] - e[1] ] + * n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + * n2 = s[0] * e[1] - s[1] * e[0] # <<<<<<<<<<<<<< + * n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + * return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3] + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_s, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_e, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_t_4); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_s, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_e, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyNumber_Multiply(__pyx_t_4, __pyx_t_1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyNumber_Subtract(__pyx_t_3, __pyx_t_2); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 17, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_n2 = __pyx_t_1; + __pyx_t_1 = 0; + + /* "box_intersection.pyx":18 + * n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + * n2 = s[0] * e[1] - s[1] * e[0] + * n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) # <<<<<<<<<<<<<< + * return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3] + * + */ + __pyx_t_1 = PyNumber_Multiply(PyList_GET_ITEM(__pyx_v_dc, 0), PyList_GET_ITEM(__pyx_v_dp, 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 18, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyNumber_Multiply(PyList_GET_ITEM(__pyx_v_dc, 1), PyList_GET_ITEM(__pyx_v_dp, 0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 18, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyFloat_TrueDivideCObj(__pyx_float_1_0, __pyx_t_3, 1.0, 0, 1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 18, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_v_n3 = __pyx_t_2; + __pyx_t_2 = 0; + + /* "box_intersection.pyx":19 + * n2 = s[0] * e[1] - s[1] * e[0] + * n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + * return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3] # <<<<<<<<<<<<<< + * + * @cython.boundscheck(False) + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_2 = PyNumber_Multiply(__pyx_v_n1, PyList_GET_ITEM(__pyx_v_dp, 0)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Multiply(__pyx_v_n2, PyList_GET_ITEM(__pyx_v_dc, 0)); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_1 = PyNumber_Subtract(__pyx_t_2, __pyx_t_3); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = PyNumber_Multiply(__pyx_t_1, __pyx_v_n3); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyNumber_Multiply(__pyx_v_n1, PyList_GET_ITEM(__pyx_v_dp, 1)); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = PyNumber_Multiply(__pyx_v_n2, PyList_GET_ITEM(__pyx_v_dc, 1)); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = PyNumber_Multiply(__pyx_t_4, __pyx_v_n3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyList_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 19, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_3); + if (__Pyx_PyList_SET_ITEM(__pyx_t_4, 0, __pyx_t_3)) __PYX_ERR(0, 19, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyList_SET_ITEM(__pyx_t_4, 1, __pyx_t_2)) __PYX_ERR(0, 19, __pyx_L1_error); + __pyx_t_3 = 0; + __pyx_t_2 = 0; + __pyx_r = __pyx_t_4; + __pyx_t_4 = 0; + goto __pyx_L0; + + /* "box_intersection.pyx":11 + * FLOAT = np.float32 + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def computeIntersection(cp1, cp2, s, e): + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_AddTraceback("box_intersection.computeIntersection", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_dc); + __Pyx_XDECREF(__pyx_v_dp); + __Pyx_XDECREF(__pyx_v_n1); + __Pyx_XDECREF(__pyx_v_n2); + __Pyx_XDECREF(__pyx_v_n3); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "box_intersection.pyx":23 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef inline bint inside(cp1, cp2, p): # <<<<<<<<<<<<<< + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + * + */ + +static CYTHON_INLINE int __pyx_f_16box_intersection_inside(PyObject *__pyx_v_cp1, PyObject *__pyx_v_cp2, PyObject *__pyx_v_p) { + int __pyx_r; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("inside", 0); + + /* "box_intersection.pyx":24 + * @cython.wraparound(False) + * cdef inline bint inside(cp1, cp2, p): + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) # <<<<<<<<<<<<<< + * + * @cython.boundscheck(False) + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cp2, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_cp1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = PyNumber_Subtract(__pyx_t_1, __pyx_t_2); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_GetItemInt(__pyx_v_p, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_cp1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_4 = PyNumber_Subtract(__pyx_t_2, __pyx_t_1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyNumber_Multiply(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_cp2, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_cp1, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_2 = PyNumber_Subtract(__pyx_t_4, __pyx_t_3); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __pyx_t_3 = __Pyx_GetItemInt(__pyx_v_p, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_4 = __Pyx_GetItemInt(__pyx_v_cp1, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = PyNumber_Subtract(__pyx_t_3, __pyx_t_4); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyNumber_Multiply(__pyx_t_2, __pyx_t_5); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = PyObject_RichCompare(__pyx_t_1, __pyx_t_4, Py_GT); __Pyx_XGOTREF(__pyx_t_5); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_5); if (unlikely((__pyx_t_6 == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 24, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_r = __pyx_t_6; + goto __pyx_L0; + + /* "box_intersection.pyx":23 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef inline bint inside(cp1, cp2, p): # <<<<<<<<<<<<<< + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + * + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_AddTraceback("box_intersection.inside", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "box_intersection.pyx":26 + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * def polygon_clip_unnest(float [:, :] subjectPolygon, float [:, :] clipPolygon): + * """ Clip a polygon with another polygon. + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_16box_intersection_3polygon_clip_unnest(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_16box_intersection_2polygon_clip_unnest, " Clip a polygon with another polygon.\n\n Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python\n\n Args:\n subjectPolygon: a list of (x,y) 2d points, any polygon.\n clipPolygon: a list of (x,y) 2d points, has to be *convex*\n Note:\n **points have to be counter-clockwise ordered**\n\n Return:\n a list of (x,y) vertex point for the intersection polygon.\n "); +static PyMethodDef __pyx_mdef_16box_intersection_3polygon_clip_unnest = {"polygon_clip_unnest", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_16box_intersection_3polygon_clip_unnest, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_16box_intersection_2polygon_clip_unnest}; +static PyObject *__pyx_pw_16box_intersection_3polygon_clip_unnest(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + __Pyx_memviewslice __pyx_v_subjectPolygon = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_clipPolygon = { 0, 0, { 0 }, { 0 }, { 0 } }; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[2] = {0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("polygon_clip_unnest (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(0, 26, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_subjectPolygon,&__pyx_n_s_clipPolygon,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_subjectPolygon)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_clipPolygon)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 26, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("polygon_clip_unnest", 1, 2, 2, 1); __PYX_ERR(0, 26, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "polygon_clip_unnest") < 0)) __PYX_ERR(0, 26, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_subjectPolygon = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_subjectPolygon.memview)) __PYX_ERR(0, 27, __pyx_L3_error) + __pyx_v_clipPolygon = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_clipPolygon.memview)) __PYX_ERR(0, 27, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("polygon_clip_unnest", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 26, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __PYX_XCLEAR_MEMVIEW(&__pyx_v_subjectPolygon, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_clipPolygon, 1); + __Pyx_AddTraceback("box_intersection.polygon_clip_unnest", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_16box_intersection_2polygon_clip_unnest(__pyx_self, __pyx_v_subjectPolygon, __pyx_v_clipPolygon); + + /* function exit code */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_subjectPolygon, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_clipPolygon, 1); + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_16box_intersection_2polygon_clip_unnest(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_subjectPolygon, __Pyx_memviewslice __pyx_v_clipPolygon) { + PyObject *__pyx_v_outputList = NULL; + __Pyx_memviewslice __pyx_v_cp1 = { 0, 0, { 0 }, { 0 }, { 0 } }; + int __pyx_v_lenc; + int __pyx_v_iidx; + int __pyx_v_cidx; + __Pyx_memviewslice __pyx_v_clipVertex = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_cp2 = { 0, 0, { 0 }, { 0 }, { 0 } }; + PyObject *__pyx_v_inputList = NULL; + PyObject *__pyx_v_s = NULL; + Py_ssize_t __pyx_v_inc; + PyObject *__pyx_v_subjectVertex = NULL; + PyObject *__pyx_v_e = NULL; + Py_ssize_t __pyx_7genexpr__pyx_v_x; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + __Pyx_memviewslice __pyx_t_5 = { 0, 0, { 0 }, { 0 }, { 0 } }; + PyObject *__pyx_t_6 = NULL; + int __pyx_t_7; + int __pyx_t_8; + int __pyx_t_9; + int __pyx_t_10; + int __pyx_t_11; + int __pyx_t_12; + PyObject *__pyx_t_13 = NULL; + PyObject *__pyx_t_14 = NULL; + PyObject *__pyx_t_15 = NULL; + int __pyx_t_16; + int __pyx_t_17; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("polygon_clip_unnest", 0); + + /* "box_intersection.pyx":41 + * a list of (x,y) vertex point for the intersection polygon. + * """ + * outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])] # <<<<<<<<<<<<<< + * cp1 = clipPolygon[-1] + * cdef int lenc = len(clipPolygon) + */ + { /* enter inner scope */ + __pyx_t_1 = PyList_New(0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_2 = (__pyx_v_subjectPolygon.shape[0]); + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_4 = 0; __pyx_t_4 < __pyx_t_3; __pyx_t_4+=1) { + __pyx_7genexpr__pyx_v_x = __pyx_t_4; + __pyx_t_5.data = __pyx_v_subjectPolygon.data; + __pyx_t_5.memview = __pyx_v_subjectPolygon.memview; + __PYX_INC_MEMVIEW(&__pyx_t_5, 1); + { + Py_ssize_t __pyx_tmp_idx = __pyx_7genexpr__pyx_v_x; + Py_ssize_t __pyx_tmp_shape = __pyx_v_subjectPolygon.shape[0]; + Py_ssize_t __pyx_tmp_stride = __pyx_v_subjectPolygon.strides[0]; + if (__pyx_tmp_idx < 0) + __pyx_tmp_idx += __pyx_tmp_shape; + __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_5.shape[0] = __pyx_v_subjectPolygon.shape[1]; +__pyx_t_5.strides[0] = __pyx_v_subjectPolygon.strides[1]; + __pyx_t_5.suboffsets[0] = -1; + +__pyx_t_6 = __pyx_memoryview_fromslice(__pyx_t_5, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 41, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __PYX_XCLEAR_MEMVIEW(&__pyx_t_5, 1); + __pyx_t_5.memview = NULL; __pyx_t_5.data = NULL; + if (unlikely(__Pyx_ListComp_Append(__pyx_t_1, (PyObject*)__pyx_t_6))) __PYX_ERR(0, 41, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + } /* exit inner scope */ + __pyx_v_outputList = ((PyObject*)__pyx_t_1); + __pyx_t_1 = 0; + + /* "box_intersection.pyx":42 + * """ + * outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])] + * cp1 = clipPolygon[-1] # <<<<<<<<<<<<<< + * cdef int lenc = len(clipPolygon) + * cdef int iidx = 0 + */ + __pyx_t_5.data = __pyx_v_clipPolygon.data; + __pyx_t_5.memview = __pyx_v_clipPolygon.memview; + __PYX_INC_MEMVIEW(&__pyx_t_5, 1); + { + Py_ssize_t __pyx_tmp_idx = -1L; + Py_ssize_t __pyx_tmp_shape = __pyx_v_clipPolygon.shape[0]; + Py_ssize_t __pyx_tmp_stride = __pyx_v_clipPolygon.strides[0]; + if (__pyx_tmp_idx < 0) + __pyx_tmp_idx += __pyx_tmp_shape; + __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_5.shape[0] = __pyx_v_clipPolygon.shape[1]; +__pyx_t_5.strides[0] = __pyx_v_clipPolygon.strides[1]; + __pyx_t_5.suboffsets[0] = -1; + +__pyx_v_cp1 = __pyx_t_5; + __pyx_t_5.memview = NULL; + __pyx_t_5.data = NULL; + + /* "box_intersection.pyx":43 + * outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])] + * cp1 = clipPolygon[-1] + * cdef int lenc = len(clipPolygon) # <<<<<<<<<<<<<< + * cdef int iidx = 0 + * + */ + __pyx_t_2 = __Pyx_MemoryView_Len(__pyx_v_clipPolygon); + __pyx_v_lenc = __pyx_t_2; + + /* "box_intersection.pyx":44 + * cp1 = clipPolygon[-1] + * cdef int lenc = len(clipPolygon) + * cdef int iidx = 0 # <<<<<<<<<<<<<< + * + * # for clipVertex in clipPolygon: + */ + __pyx_v_iidx = 0; + + /* "box_intersection.pyx":47 + * + * # for clipVertex in clipPolygon: + * for cidx in range(lenc): # <<<<<<<<<<<<<< + * clipVertex = clipPolygon[cidx] + * cp2 = clipVertex + */ + __pyx_t_7 = __pyx_v_lenc; + __pyx_t_8 = __pyx_t_7; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_cidx = __pyx_t_9; + + /* "box_intersection.pyx":48 + * # for clipVertex in clipPolygon: + * for cidx in range(lenc): + * clipVertex = clipPolygon[cidx] # <<<<<<<<<<<<<< + * cp2 = clipVertex + * inputList = outputList.copy() + */ + __pyx_t_5.data = __pyx_v_clipPolygon.data; + __pyx_t_5.memview = __pyx_v_clipPolygon.memview; + __PYX_INC_MEMVIEW(&__pyx_t_5, 1); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_cidx; + Py_ssize_t __pyx_tmp_shape = __pyx_v_clipPolygon.shape[0]; + Py_ssize_t __pyx_tmp_stride = __pyx_v_clipPolygon.strides[0]; + if (__pyx_tmp_idx < 0) + __pyx_tmp_idx += __pyx_tmp_shape; + __pyx_t_5.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_5.shape[0] = __pyx_v_clipPolygon.shape[1]; +__pyx_t_5.strides[0] = __pyx_v_clipPolygon.strides[1]; + __pyx_t_5.suboffsets[0] = -1; + +__PYX_XCLEAR_MEMVIEW(&__pyx_v_clipVertex, 1); + __pyx_v_clipVertex = __pyx_t_5; + __pyx_t_5.memview = NULL; + __pyx_t_5.data = NULL; + + /* "box_intersection.pyx":49 + * for cidx in range(lenc): + * clipVertex = clipPolygon[cidx] + * cp2 = clipVertex # <<<<<<<<<<<<<< + * inputList = outputList.copy() + * outputList.clear() + */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp2, 1); + __PYX_INC_MEMVIEW(&__pyx_v_clipVertex, 1); + __pyx_v_cp2 = __pyx_v_clipVertex; + + /* "box_intersection.pyx":50 + * clipVertex = clipPolygon[cidx] + * cp2 = clipVertex + * inputList = outputList.copy() # <<<<<<<<<<<<<< + * outputList.clear() + * s = inputList[-1] + */ + __pyx_t_1 = __Pyx_CallUnboundCMethod0(&__pyx_umethod_PyList_Type_copy, __pyx_v_outputList); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 50, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XDECREF_SET(__pyx_v_inputList, __pyx_t_1); + __pyx_t_1 = 0; + + /* "box_intersection.pyx":51 + * cp2 = clipVertex + * inputList = outputList.copy() + * outputList.clear() # <<<<<<<<<<<<<< + * s = inputList[-1] + * + */ + __pyx_t_1 = __Pyx_CallUnboundCMethod0(&__pyx_umethod_PyList_Type_clear, __pyx_v_outputList); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 51, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "box_intersection.pyx":52 + * inputList = outputList.copy() + * outputList.clear() + * s = inputList[-1] # <<<<<<<<<<<<<< + * + * inc = len(inputList) + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_inputList, -1L, long, 1, __Pyx_PyInt_From_long, 0, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 52, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XDECREF_SET(__pyx_v_s, __pyx_t_1); + __pyx_t_1 = 0; + + /* "box_intersection.pyx":54 + * s = inputList[-1] + * + * inc = len(inputList) # <<<<<<<<<<<<<< + * + * # for subjectVertex in inputList: + */ + __pyx_t_2 = PyObject_Length(__pyx_v_inputList); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 54, __pyx_L1_error) + __pyx_v_inc = __pyx_t_2; + + /* "box_intersection.pyx":57 + * + * # for subjectVertex in inputList: + * for iidx in range(inc): # <<<<<<<<<<<<<< + * subjectVertex = inputList[iidx] + * e = subjectVertex + */ + __pyx_t_2 = __pyx_v_inc; + __pyx_t_3 = __pyx_t_2; + for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_3; __pyx_t_10+=1) { + __pyx_v_iidx = __pyx_t_10; + + /* "box_intersection.pyx":58 + * # for subjectVertex in inputList: + * for iidx in range(inc): + * subjectVertex = inputList[iidx] # <<<<<<<<<<<<<< + * e = subjectVertex + * if inside(cp1, cp2, e): + */ + __pyx_t_1 = __Pyx_GetItemInt(__pyx_v_inputList, __pyx_v_iidx, int, 1, __Pyx_PyInt_From_int, 0, 1, 0); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 58, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_XDECREF_SET(__pyx_v_subjectVertex, __pyx_t_1); + __pyx_t_1 = 0; + + /* "box_intersection.pyx":59 + * for iidx in range(inc): + * subjectVertex = inputList[iidx] + * e = subjectVertex # <<<<<<<<<<<<<< + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): + */ + __Pyx_INCREF(__pyx_v_subjectVertex); + __Pyx_XDECREF_SET(__pyx_v_e, __pyx_v_subjectVertex); + + /* "box_intersection.pyx":60 + * subjectVertex = inputList[iidx] + * e = subjectVertex + * if inside(cp1, cp2, e): # <<<<<<<<<<<<<< + * if not inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) + */ + __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_11 = __pyx_f_16box_intersection_inside(__pyx_t_1, __pyx_t_6, __pyx_v_e); if (unlikely(__pyx_t_11 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 60, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (__pyx_t_11) { + + /* "box_intersection.pyx":61 + * e = subjectVertex + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * outputList.append(e) + */ + __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_11 = __pyx_f_16box_intersection_inside(__pyx_t_6, __pyx_t_1, __pyx_v_s); if (unlikely(__pyx_t_11 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 61, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_12 = (!__pyx_t_11); + if (__pyx_t_12) { + + /* "box_intersection.pyx":62 + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) # <<<<<<<<<<<<<< + * outputList.append(e) + * elif inside(cp1, cp2, s): + */ + __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_computeIntersection); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_13 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_13); + __pyx_t_14 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_15 = NULL; + __pyx_t_16 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_6))) { + __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_6); + if (likely(__pyx_t_15)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_6); + __Pyx_INCREF(__pyx_t_15); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_6, function); + __pyx_t_16 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[5] = {__pyx_t_15, __pyx_t_13, __pyx_t_14, __pyx_v_s, __pyx_v_e}; + __pyx_t_1 = __Pyx_PyObject_FastCall(__pyx_t_6, __pyx_callargs+1-__pyx_t_16, 4+__pyx_t_16); + __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; + __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + } + __pyx_t_17 = __Pyx_PyList_Append(__pyx_v_outputList, __pyx_t_1); if (unlikely(__pyx_t_17 == ((int)-1))) __PYX_ERR(0, 62, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + + /* "box_intersection.pyx":61 + * e = subjectVertex + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * outputList.append(e) + */ + } + + /* "box_intersection.pyx":63 + * if not inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * outputList.append(e) # <<<<<<<<<<<<<< + * elif inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) + */ + __pyx_t_17 = __Pyx_PyList_Append(__pyx_v_outputList, __pyx_v_e); if (unlikely(__pyx_t_17 == ((int)-1))) __PYX_ERR(0, 63, __pyx_L1_error) + + /* "box_intersection.pyx":60 + * subjectVertex = inputList[iidx] + * e = subjectVertex + * if inside(cp1, cp2, e): # <<<<<<<<<<<<<< + * if not inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) + */ + goto __pyx_L9; + } + + /* "box_intersection.pyx":64 + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * outputList.append(e) + * elif inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * s = e + */ + __pyx_t_1 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_12 = __pyx_f_16box_intersection_inside(__pyx_t_1, __pyx_t_6, __pyx_v_s); if (unlikely(__pyx_t_12 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 64, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (__pyx_t_12) { + + /* "box_intersection.pyx":65 + * outputList.append(e) + * elif inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) # <<<<<<<<<<<<<< + * s = e + * cp1 = cp2 + */ + __Pyx_GetModuleGlobalName(__pyx_t_1, __pyx_n_s_computeIntersection); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_t_14 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_14)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_14); + __pyx_t_13 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_13)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_13); + __pyx_t_15 = NULL; + __pyx_t_16 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_1))) { + __pyx_t_15 = PyMethod_GET_SELF(__pyx_t_1); + if (likely(__pyx_t_15)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_1); + __Pyx_INCREF(__pyx_t_15); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_1, function); + __pyx_t_16 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[5] = {__pyx_t_15, __pyx_t_14, __pyx_t_13, __pyx_v_s, __pyx_v_e}; + __pyx_t_6 = __Pyx_PyObject_FastCall(__pyx_t_1, __pyx_callargs+1-__pyx_t_16, 4+__pyx_t_16); + __Pyx_XDECREF(__pyx_t_15); __pyx_t_15 = 0; + __Pyx_DECREF(__pyx_t_14); __pyx_t_14 = 0; + __Pyx_DECREF(__pyx_t_13); __pyx_t_13 = 0; + if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + } + __pyx_t_17 = __Pyx_PyList_Append(__pyx_v_outputList, __pyx_t_6); if (unlikely(__pyx_t_17 == ((int)-1))) __PYX_ERR(0, 65, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + + /* "box_intersection.pyx":64 + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * outputList.append(e) + * elif inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * s = e + */ + } + __pyx_L9:; + + /* "box_intersection.pyx":66 + * elif inside(cp1, cp2, s): + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * s = e # <<<<<<<<<<<<<< + * cp1 = cp2 + * if len(outputList) == 0: + */ + __Pyx_INCREF(__pyx_v_e); + __Pyx_DECREF_SET(__pyx_v_s, __pyx_v_e); + } + + /* "box_intersection.pyx":67 + * outputList.append(computeIntersection(cp1, cp2, s, e)) + * s = e + * cp1 = cp2 # <<<<<<<<<<<<<< + * if len(outputList) == 0: + * break + */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp1, 1); + __PYX_INC_MEMVIEW(&__pyx_v_cp2, 1); + __pyx_v_cp1 = __pyx_v_cp2; + + /* "box_intersection.pyx":68 + * s = e + * cp1 = cp2 + * if len(outputList) == 0: # <<<<<<<<<<<<<< + * break + * return outputList + */ + __pyx_t_2 = __Pyx_PyList_GET_SIZE(__pyx_v_outputList); if (unlikely(__pyx_t_2 == ((Py_ssize_t)-1))) __PYX_ERR(0, 68, __pyx_L1_error) + __pyx_t_12 = (__pyx_t_2 == 0); + if (__pyx_t_12) { + + /* "box_intersection.pyx":69 + * cp1 = cp2 + * if len(outputList) == 0: + * break # <<<<<<<<<<<<<< + * return outputList + * + */ + goto __pyx_L6_break; + + /* "box_intersection.pyx":68 + * s = e + * cp1 = cp2 + * if len(outputList) == 0: # <<<<<<<<<<<<<< + * break + * return outputList + */ + } + } + __pyx_L6_break:; + + /* "box_intersection.pyx":70 + * if len(outputList) == 0: + * break + * return outputList # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __Pyx_INCREF(__pyx_v_outputList); + __pyx_r = __pyx_v_outputList; + goto __pyx_L0; + + /* "box_intersection.pyx":26 + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * def polygon_clip_unnest(float [:, :] subjectPolygon, float [:, :] clipPolygon): + * """ Clip a polygon with another polygon. + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __PYX_XCLEAR_MEMVIEW(&__pyx_t_5, 1); + __Pyx_XDECREF(__pyx_t_6); + __Pyx_XDECREF(__pyx_t_13); + __Pyx_XDECREF(__pyx_t_14); + __Pyx_XDECREF(__pyx_t_15); + __Pyx_AddTraceback("box_intersection.polygon_clip_unnest", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_outputList); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp1, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_clipVertex, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp2, 1); + __Pyx_XDECREF(__pyx_v_inputList); + __Pyx_XDECREF(__pyx_v_s); + __Pyx_XDECREF(__pyx_v_subjectVertex); + __Pyx_XDECREF(__pyx_v_e); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "box_intersection.pyx":75 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void copy_points(float[:, :] src, float[:, :] dst, Py_ssize_t num_points): # <<<<<<<<<<<<<< + * cdef Py_ssize_t i + * for i in range(num_points): + */ + +static void __pyx_f_16box_intersection_copy_points(__Pyx_memviewslice __pyx_v_src, __Pyx_memviewslice __pyx_v_dst, Py_ssize_t __pyx_v_num_points) { + Py_ssize_t __pyx_v_i; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + Py_ssize_t __pyx_t_7; + __Pyx_RefNannySetupContext("copy_points", 0); + + /* "box_intersection.pyx":77 + * cdef void copy_points(float[:, :] src, float[:, :] dst, Py_ssize_t num_points): + * cdef Py_ssize_t i + * for i in range(num_points): # <<<<<<<<<<<<<< + * dst[i][0] = src[i][0] + * dst[i][1] = src[i][1] + */ + __pyx_t_1 = __pyx_v_num_points; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_i = __pyx_t_3; + + /* "box_intersection.pyx":78 + * cdef Py_ssize_t i + * for i in range(num_points): + * dst[i][0] = src[i][0] # <<<<<<<<<<<<<< + * dst[i][1] = src[i][1] + * + */ + __pyx_t_4 = __pyx_v_i; + __pyx_t_5 = 0; + __pyx_t_6 = __pyx_v_i; + __pyx_t_7 = 0; + *((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_dst.data + __pyx_t_6 * __pyx_v_dst.strides[0]) ) + __pyx_t_7 * __pyx_v_dst.strides[1]) )) = (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_src.data + __pyx_t_4 * __pyx_v_src.strides[0]) ) + __pyx_t_5 * __pyx_v_src.strides[1]) ))); + + /* "box_intersection.pyx":79 + * for i in range(num_points): + * dst[i][0] = src[i][0] + * dst[i][1] = src[i][1] # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_5 = __pyx_v_i; + __pyx_t_4 = 1; + __pyx_t_7 = __pyx_v_i; + __pyx_t_6 = 1; + *((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_dst.data + __pyx_t_7 * __pyx_v_dst.strides[0]) ) + __pyx_t_6 * __pyx_v_dst.strides[1]) )) = (*((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_src.data + __pyx_t_5 * __pyx_v_src.strides[0]) ) + __pyx_t_4 * __pyx_v_src.strides[1]) ))); + } + + /* "box_intersection.pyx":75 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef void copy_points(float[:, :] src, float[:, :] dst, Py_ssize_t num_points): # <<<<<<<<<<<<<< + * cdef Py_ssize_t i + * for i in range(num_points): + */ + + /* function exit code */ + __Pyx_RefNannyFinishContext(); +} + +/* "box_intersection.pyx":84 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef inline Py_ssize_t add_point(float[:, :] arr, float[:] point, Py_ssize_t num_points): # <<<<<<<<<<<<<< + * # assert num_points < arr.shape[0] - 1 + * # for j in range(dim): + */ + +static CYTHON_INLINE Py_ssize_t __pyx_f_16box_intersection_add_point(__Pyx_memviewslice __pyx_v_arr, __Pyx_memviewslice __pyx_v_point, Py_ssize_t __pyx_v_num_points) { + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + __Pyx_RefNannySetupContext("add_point", 0); + + /* "box_intersection.pyx":87 + * # assert num_points < arr.shape[0] - 1 + * # for j in range(dim): + * arr[num_points][0] = point[0] # <<<<<<<<<<<<<< + * arr[num_points][1] = point[1] + * num_points = num_points + 1 + */ + __pyx_t_1 = 0; + __pyx_t_2 = __pyx_v_num_points; + __pyx_t_3 = 0; + *((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_arr.data + __pyx_t_2 * __pyx_v_arr.strides[0]) ) + __pyx_t_3 * __pyx_v_arr.strides[1]) )) = (*((float *) ( /* dim=0 */ (__pyx_v_point.data + __pyx_t_1 * __pyx_v_point.strides[0]) ))); + + /* "box_intersection.pyx":88 + * # for j in range(dim): + * arr[num_points][0] = point[0] + * arr[num_points][1] = point[1] # <<<<<<<<<<<<<< + * num_points = num_points + 1 + * return num_points + */ + __pyx_t_1 = 1; + __pyx_t_3 = __pyx_v_num_points; + __pyx_t_2 = 1; + *((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_arr.data + __pyx_t_3 * __pyx_v_arr.strides[0]) ) + __pyx_t_2 * __pyx_v_arr.strides[1]) )) = (*((float *) ( /* dim=0 */ (__pyx_v_point.data + __pyx_t_1 * __pyx_v_point.strides[0]) ))); + + /* "box_intersection.pyx":89 + * arr[num_points][0] = point[0] + * arr[num_points][1] = point[1] + * num_points = num_points + 1 # <<<<<<<<<<<<<< + * return num_points + * + */ + __pyx_v_num_points = (__pyx_v_num_points + 1); + + /* "box_intersection.pyx":90 + * arr[num_points][1] = point[1] + * num_points = num_points + 1 + * return num_points # <<<<<<<<<<<<<< + * + * @cython.boundscheck(False) + */ + __pyx_r = __pyx_v_num_points; + goto __pyx_L0; + + /* "box_intersection.pyx":84 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef inline Py_ssize_t add_point(float[:, :] arr, float[:] point, Py_ssize_t num_points): # <<<<<<<<<<<<<< + * # assert num_points < arr.shape[0] - 1 + * # for j in range(dim): + */ + + /* function exit code */ + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "box_intersection.pyx":94 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef Py_ssize_t computeIntersection_and_add(float[:] cp1, float[:] cp2, float[:] s, float[:] e, float[:, :] arr, Py_ssize_t num_points): # <<<<<<<<<<<<<< + * # dc_np = np.zeros(2, dtype=np.float32) + * cdef float[2] dc + */ + +static Py_ssize_t __pyx_f_16box_intersection_computeIntersection_and_add(__Pyx_memviewslice __pyx_v_cp1, __Pyx_memviewslice __pyx_v_cp2, __Pyx_memviewslice __pyx_v_s, __Pyx_memviewslice __pyx_v_e, __Pyx_memviewslice __pyx_v_arr, Py_ssize_t __pyx_v_num_points) { + float __pyx_v_dc[2]; + float __pyx_v_dp[2]; + float __pyx_v_n1; + float __pyx_v_n2; + float __pyx_v_n3; + Py_ssize_t __pyx_r; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + float __pyx_t_5; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("computeIntersection_and_add", 0); + + /* "box_intersection.pyx":97 + * # dc_np = np.zeros(2, dtype=np.float32) + * cdef float[2] dc + * dc[0] = cp1[0] - cp2[0] # <<<<<<<<<<<<<< + * dc[1] = cp1[1] - cp2[1] + * + */ + __pyx_t_1 = 0; + __pyx_t_2 = 0; + (__pyx_v_dc[0]) = ((*((float *) ( /* dim=0 */ (__pyx_v_cp1.data + __pyx_t_1 * __pyx_v_cp1.strides[0]) ))) - (*((float *) ( /* dim=0 */ (__pyx_v_cp2.data + __pyx_t_2 * __pyx_v_cp2.strides[0]) )))); + + /* "box_intersection.pyx":98 + * cdef float[2] dc + * dc[0] = cp1[0] - cp2[0] + * dc[1] = cp1[1] - cp2[1] # <<<<<<<<<<<<<< + * + * # dp_np = np.zeros(2, dtype=np.float32) + */ + __pyx_t_2 = 1; + __pyx_t_1 = 1; + (__pyx_v_dc[1]) = ((*((float *) ( /* dim=0 */ (__pyx_v_cp1.data + __pyx_t_2 * __pyx_v_cp1.strides[0]) ))) - (*((float *) ( /* dim=0 */ (__pyx_v_cp2.data + __pyx_t_1 * __pyx_v_cp2.strides[0]) )))); + + /* "box_intersection.pyx":102 + * # dp_np = np.zeros(2, dtype=np.float32) + * cdef float[2] dp + * dp[0] = s[0] - e[0] # <<<<<<<<<<<<<< + * dp[1] = s[1] - e[1] + * + */ + __pyx_t_1 = 0; + __pyx_t_2 = 0; + (__pyx_v_dp[0]) = ((*((float *) ( /* dim=0 */ (__pyx_v_s.data + __pyx_t_1 * __pyx_v_s.strides[0]) ))) - (*((float *) ( /* dim=0 */ (__pyx_v_e.data + __pyx_t_2 * __pyx_v_e.strides[0]) )))); + + /* "box_intersection.pyx":103 + * cdef float[2] dp + * dp[0] = s[0] - e[0] + * dp[1] = s[1] - e[1] # <<<<<<<<<<<<<< + * + * cdef float n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + */ + __pyx_t_2 = 1; + __pyx_t_1 = 1; + (__pyx_v_dp[1]) = ((*((float *) ( /* dim=0 */ (__pyx_v_s.data + __pyx_t_2 * __pyx_v_s.strides[0]) ))) - (*((float *) ( /* dim=0 */ (__pyx_v_e.data + __pyx_t_1 * __pyx_v_e.strides[0]) )))); + + /* "box_intersection.pyx":105 + * dp[1] = s[1] - e[1] + * + * cdef float n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] # <<<<<<<<<<<<<< + * cdef float n2 = s[0] * e[1] - s[1] * e[0] + * cdef float n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + */ + __pyx_t_1 = 0; + __pyx_t_2 = 1; + __pyx_t_3 = 1; + __pyx_t_4 = 0; + __pyx_v_n1 = (((*((float *) ( /* dim=0 */ (__pyx_v_cp1.data + __pyx_t_1 * __pyx_v_cp1.strides[0]) ))) * (*((float *) ( /* dim=0 */ (__pyx_v_cp2.data + __pyx_t_2 * __pyx_v_cp2.strides[0]) )))) - ((*((float *) ( /* dim=0 */ (__pyx_v_cp1.data + __pyx_t_3 * __pyx_v_cp1.strides[0]) ))) * (*((float *) ( /* dim=0 */ (__pyx_v_cp2.data + __pyx_t_4 * __pyx_v_cp2.strides[0]) ))))); + + /* "box_intersection.pyx":106 + * + * cdef float n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + * cdef float n2 = s[0] * e[1] - s[1] * e[0] # <<<<<<<<<<<<<< + * cdef float n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + * + */ + __pyx_t_4 = 0; + __pyx_t_3 = 1; + __pyx_t_2 = 1; + __pyx_t_1 = 0; + __pyx_v_n2 = (((*((float *) ( /* dim=0 */ (__pyx_v_s.data + __pyx_t_4 * __pyx_v_s.strides[0]) ))) * (*((float *) ( /* dim=0 */ (__pyx_v_e.data + __pyx_t_3 * __pyx_v_e.strides[0]) )))) - ((*((float *) ( /* dim=0 */ (__pyx_v_s.data + __pyx_t_2 * __pyx_v_s.strides[0]) ))) * (*((float *) ( /* dim=0 */ (__pyx_v_e.data + __pyx_t_1 * __pyx_v_e.strides[0]) ))))); + + /* "box_intersection.pyx":107 + * cdef float n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + * cdef float n2 = s[0] * e[1] - s[1] * e[0] + * cdef float n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) # <<<<<<<<<<<<<< + * + * arr[num_points][0] = (n1*dp[0] - n2*dc[0]) * n3 + */ + __pyx_t_5 = (((__pyx_v_dc[0]) * (__pyx_v_dp[1])) - ((__pyx_v_dc[1]) * (__pyx_v_dp[0]))); + if (unlikely(__pyx_t_5 == 0)) { + PyErr_SetString(PyExc_ZeroDivisionError, "float division"); + __PYX_ERR(0, 107, __pyx_L1_error) + } + __pyx_v_n3 = (1.0 / ((double)__pyx_t_5)); + + /* "box_intersection.pyx":109 + * cdef float n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + * + * arr[num_points][0] = (n1*dp[0] - n2*dc[0]) * n3 # <<<<<<<<<<<<<< + * arr[num_points][1] = (n1*dp[1] - n2*dc[1]) * n3 + * num_points = num_points + 1 + */ + __pyx_t_1 = __pyx_v_num_points; + __pyx_t_2 = 0; + *((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_arr.data + __pyx_t_1 * __pyx_v_arr.strides[0]) ) + __pyx_t_2 * __pyx_v_arr.strides[1]) )) = (((__pyx_v_n1 * (__pyx_v_dp[0])) - (__pyx_v_n2 * (__pyx_v_dc[0]))) * __pyx_v_n3); + + /* "box_intersection.pyx":110 + * + * arr[num_points][0] = (n1*dp[0] - n2*dc[0]) * n3 + * arr[num_points][1] = (n1*dp[1] - n2*dc[1]) * n3 # <<<<<<<<<<<<<< + * num_points = num_points + 1 + * + */ + __pyx_t_2 = __pyx_v_num_points; + __pyx_t_1 = 1; + *((float *) ( /* dim=1 */ (( /* dim=0 */ (__pyx_v_arr.data + __pyx_t_2 * __pyx_v_arr.strides[0]) ) + __pyx_t_1 * __pyx_v_arr.strides[1]) )) = (((__pyx_v_n1 * (__pyx_v_dp[1])) - (__pyx_v_n2 * (__pyx_v_dc[1]))) * __pyx_v_n3); + + /* "box_intersection.pyx":111 + * arr[num_points][0] = (n1*dp[0] - n2*dc[0]) * n3 + * arr[num_points][1] = (n1*dp[1] - n2*dc[1]) * n3 + * num_points = num_points + 1 # <<<<<<<<<<<<<< + * + * return num_points + */ + __pyx_v_num_points = (__pyx_v_num_points + 1); + + /* "box_intersection.pyx":113 + * num_points = num_points + 1 + * + * return num_points # <<<<<<<<<<<<<< + * + * @cython.boundscheck(False) + */ + __pyx_r = __pyx_v_num_points; + goto __pyx_L0; + + /* "box_intersection.pyx":94 + * @cython.boundscheck(False) + * @cython.wraparound(False) + * cdef Py_ssize_t computeIntersection_and_add(float[:] cp1, float[:] cp2, float[:] s, float[:] e, float[:, :] arr, Py_ssize_t num_points): # <<<<<<<<<<<<<< + * # dc_np = np.zeros(2, dtype=np.float32) + * cdef float[2] dc + */ + + /* function exit code */ + __pyx_L1_error:; + __Pyx_AddTraceback("box_intersection.computeIntersection_and_add", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = -1; + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "box_intersection.pyx":115 + * return num_points + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def polygon_clip_float(float [:, :] subjectPolygon, float [:, :] clipPolygon): + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_16box_intersection_5polygon_clip_float(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_16box_intersection_4polygon_clip_float, "\n Assumes subjectPolygon and clipPolygon have 4 vertices\n "); +static PyMethodDef __pyx_mdef_16box_intersection_5polygon_clip_float = {"polygon_clip_float", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_16box_intersection_5polygon_clip_float, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_16box_intersection_4polygon_clip_float}; +static PyObject *__pyx_pw_16box_intersection_5polygon_clip_float(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + __Pyx_memviewslice __pyx_v_subjectPolygon = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_clipPolygon = { 0, 0, { 0 }, { 0 }, { 0 } }; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[2] = {0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("polygon_clip_float (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(0, 115, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_subjectPolygon,&__pyx_n_s_clipPolygon,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_subjectPolygon)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 115, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_clipPolygon)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 115, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("polygon_clip_float", 1, 2, 2, 1); __PYX_ERR(0, 115, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "polygon_clip_float") < 0)) __PYX_ERR(0, 115, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 2)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + } + __pyx_v_subjectPolygon = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_subjectPolygon.memview)) __PYX_ERR(0, 117, __pyx_L3_error) + __pyx_v_clipPolygon = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_clipPolygon.memview)) __PYX_ERR(0, 117, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("polygon_clip_float", 1, 2, 2, __pyx_nargs); __PYX_ERR(0, 115, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __PYX_XCLEAR_MEMVIEW(&__pyx_v_subjectPolygon, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_clipPolygon, 1); + __Pyx_AddTraceback("box_intersection.polygon_clip_float", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_16box_intersection_4polygon_clip_float(__pyx_self, __pyx_v_subjectPolygon, __pyx_v_clipPolygon); + + /* function exit code */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_subjectPolygon, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_clipPolygon, 1); + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_16box_intersection_4polygon_clip_float(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_subjectPolygon, __Pyx_memviewslice __pyx_v_clipPolygon) { + Py_ssize_t __pyx_v_num_clip_points; + __Pyx_memviewslice __pyx_v_cp1 = { 0, 0, { 0 }, { 0 }, { 0 } }; + long __pyx_v_MAX_INTERSECT_POINTS; + CYTHON_UNUSED long __pyx_v_num_intersect_points; + PyObject *__pyx_v_outputList_np = NULL; + __Pyx_memviewslice __pyx_v_outputList = { 0, 0, { 0 }, { 0 }, { 0 } }; + PyObject *__pyx_v_inputList_np = NULL; + __Pyx_memviewslice __pyx_v_inputList = { 0, 0, { 0 }, { 0 }, { 0 } }; + Py_ssize_t __pyx_v_noutput_list; + Py_ssize_t __pyx_v_ninput_list; + Py_ssize_t __pyx_v_iidx; + Py_ssize_t __pyx_v_cidx; + __Pyx_memviewslice __pyx_v_clipVertex = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_cp2 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_s = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_e = { 0, 0, { 0 }, { 0 }, { 0 } }; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + __Pyx_memviewslice __pyx_t_1 = { 0, 0, { 0 }, { 0 }, { 0 } }; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + PyObject *__pyx_t_6 = NULL; + __Pyx_memviewslice __pyx_t_7 = { 0, 0, { 0 }, { 0 }, { 0 } }; + Py_ssize_t __pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + Py_ssize_t __pyx_t_11; + Py_ssize_t __pyx_t_12; + Py_ssize_t __pyx_t_13; + int __pyx_t_14; + int __pyx_t_15; + Py_ssize_t __pyx_t_16; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("polygon_clip_float", 0); + + /* "box_intersection.pyx":121 + * Assumes subjectPolygon and clipPolygon have 4 vertices + * """ + * cdef Py_ssize_t num_clip_points = clipPolygon.shape[0] # <<<<<<<<<<<<<< + * cp1 = clipPolygon[num_clip_points - 1] + * + */ + __pyx_v_num_clip_points = (__pyx_v_clipPolygon.shape[0]); + + /* "box_intersection.pyx":122 + * """ + * cdef Py_ssize_t num_clip_points = clipPolygon.shape[0] + * cp1 = clipPolygon[num_clip_points - 1] # <<<<<<<<<<<<<< + * + * MAX_INTERSECT_POINTS = 10 + */ + __pyx_t_1.data = __pyx_v_clipPolygon.data; + __pyx_t_1.memview = __pyx_v_clipPolygon.memview; + __PYX_INC_MEMVIEW(&__pyx_t_1, 1); + { + Py_ssize_t __pyx_tmp_idx = (__pyx_v_num_clip_points - 1); + Py_ssize_t __pyx_tmp_stride = __pyx_v_clipPolygon.strides[0]; + __pyx_t_1.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_1.shape[0] = __pyx_v_clipPolygon.shape[1]; +__pyx_t_1.strides[0] = __pyx_v_clipPolygon.strides[1]; + __pyx_t_1.suboffsets[0] = -1; + +__pyx_v_cp1 = __pyx_t_1; + __pyx_t_1.memview = NULL; + __pyx_t_1.data = NULL; + + /* "box_intersection.pyx":124 + * cp1 = clipPolygon[num_clip_points - 1] + * + * MAX_INTERSECT_POINTS = 10 # <<<<<<<<<<<<<< + * num_intersect_points = 0 + * outputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + */ + __pyx_v_MAX_INTERSECT_POINTS = 10; + + /* "box_intersection.pyx":125 + * + * MAX_INTERSECT_POINTS = 10 + * num_intersect_points = 0 # <<<<<<<<<<<<<< + * outputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + * cdef float[:, :] outputList = outputList_np + */ + __pyx_v_num_intersect_points = 0; + + /* "box_intersection.pyx":126 + * MAX_INTERSECT_POINTS = 10 + * num_intersect_points = 0 + * outputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) # <<<<<<<<<<<<<< + * cdef float[:, :] outputList = outputList_np + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_2, __pyx_n_s_np); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_3 = __Pyx_PyObject_GetAttrStr(__pyx_t_2, __pyx_n_s_zeros); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyInt_From_long(__pyx_v_MAX_INTERSECT_POINTS); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_4 = PyTuple_New(2); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 0, __pyx_t_2)) __PYX_ERR(0, 126, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_4, 1, __pyx_int_2)) __PYX_ERR(0, 126, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_2 = PyTuple_New(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_4); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error); + __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_GetModuleGlobalName(__pyx_t_5, __pyx_n_s_np); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_6 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_float32); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + if (PyDict_SetItem(__pyx_t_4, __pyx_n_s_dtype, __pyx_t_6) < 0) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyObject_Call(__pyx_t_3, __pyx_t_2, __pyx_t_4); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 126, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_v_outputList_np = __pyx_t_6; + __pyx_t_6 = 0; + + /* "box_intersection.pyx":127 + * num_intersect_points = 0 + * outputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + * cdef float[:, :] outputList = outputList_np # <<<<<<<<<<<<<< + * + * inputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + */ + __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_v_outputList_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 127, __pyx_L1_error) + __pyx_v_outputList = __pyx_t_7; + __pyx_t_7.memview = NULL; + __pyx_t_7.data = NULL; + + /* "box_intersection.pyx":129 + * cdef float[:, :] outputList = outputList_np + * + * inputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) # <<<<<<<<<<<<<< + * cdef float[:, :] inputList = inputList_np + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_6, __pyx_n_s_np); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_6, __pyx_n_s_zeros); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __pyx_t_6 = __Pyx_PyInt_From_long(__pyx_v_MAX_INTERSECT_POINTS); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_t_6)) __PYX_ERR(0, 129, __pyx_L1_error); + __Pyx_INCREF(__pyx_int_2); + __Pyx_GIVEREF(__pyx_int_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_int_2)) __PYX_ERR(0, 129, __pyx_L1_error); + __pyx_t_6 = 0; + __pyx_t_6 = PyTuple_New(1); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __Pyx_GIVEREF(__pyx_t_2); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_6, 0, __pyx_t_2)) __PYX_ERR(0, 129, __pyx_L1_error); + __pyx_t_2 = 0; + __pyx_t_2 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_GetModuleGlobalName(__pyx_t_3, __pyx_n_s_np); if (unlikely(!__pyx_t_3)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_3); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_3, __pyx_n_s_float32); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_3); __pyx_t_3 = 0; + if (PyDict_SetItem(__pyx_t_2, __pyx_n_s_dtype, __pyx_t_5) < 0) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_5 = __Pyx_PyObject_Call(__pyx_t_4, __pyx_t_6, __pyx_t_2); if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 129, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __pyx_v_inputList_np = __pyx_t_5; + __pyx_t_5 = 0; + + /* "box_intersection.pyx":130 + * + * inputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + * cdef float[:, :] inputList = inputList_np # <<<<<<<<<<<<<< + * + * copy_points(subjectPolygon, outputList, subjectPolygon.shape[0]) + */ + __pyx_t_7 = __Pyx_PyObject_to_MemoryviewSlice_dsds_float(__pyx_v_inputList_np, PyBUF_WRITABLE); if (unlikely(!__pyx_t_7.memview)) __PYX_ERR(0, 130, __pyx_L1_error) + __pyx_v_inputList = __pyx_t_7; + __pyx_t_7.memview = NULL; + __pyx_t_7.data = NULL; + + /* "box_intersection.pyx":132 + * cdef float[:, :] inputList = inputList_np + * + * copy_points(subjectPolygon, outputList, subjectPolygon.shape[0]) # <<<<<<<<<<<<<< + * cdef Py_ssize_t noutput_list = subjectPolygon.shape[0] + * cdef Py_ssize_t ninput_list = 0 + */ + __pyx_f_16box_intersection_copy_points(__pyx_v_subjectPolygon, __pyx_v_outputList, (__pyx_v_subjectPolygon.shape[0])); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 132, __pyx_L1_error) + + /* "box_intersection.pyx":133 + * + * copy_points(subjectPolygon, outputList, subjectPolygon.shape[0]) + * cdef Py_ssize_t noutput_list = subjectPolygon.shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t ninput_list = 0 + * cdef Py_ssize_t iidx = 0 + */ + __pyx_v_noutput_list = (__pyx_v_subjectPolygon.shape[0]); + + /* "box_intersection.pyx":134 + * copy_points(subjectPolygon, outputList, subjectPolygon.shape[0]) + * cdef Py_ssize_t noutput_list = subjectPolygon.shape[0] + * cdef Py_ssize_t ninput_list = 0 # <<<<<<<<<<<<<< + * cdef Py_ssize_t iidx = 0 + * + */ + __pyx_v_ninput_list = 0; + + /* "box_intersection.pyx":135 + * cdef Py_ssize_t noutput_list = subjectPolygon.shape[0] + * cdef Py_ssize_t ninput_list = 0 + * cdef Py_ssize_t iidx = 0 # <<<<<<<<<<<<<< + * + * for cidx in range(num_clip_points): + */ + __pyx_v_iidx = 0; + + /* "box_intersection.pyx":137 + * cdef Py_ssize_t iidx = 0 + * + * for cidx in range(num_clip_points): # <<<<<<<<<<<<<< + * clipVertex = clipPolygon[cidx] + * cp2 = clipVertex + */ + __pyx_t_8 = __pyx_v_num_clip_points; + __pyx_t_9 = __pyx_t_8; + for (__pyx_t_10 = 0; __pyx_t_10 < __pyx_t_9; __pyx_t_10+=1) { + __pyx_v_cidx = __pyx_t_10; + + /* "box_intersection.pyx":138 + * + * for cidx in range(num_clip_points): + * clipVertex = clipPolygon[cidx] # <<<<<<<<<<<<<< + * cp2 = clipVertex + * + */ + __pyx_t_1.data = __pyx_v_clipPolygon.data; + __pyx_t_1.memview = __pyx_v_clipPolygon.memview; + __PYX_INC_MEMVIEW(&__pyx_t_1, 1); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_cidx; + Py_ssize_t __pyx_tmp_stride = __pyx_v_clipPolygon.strides[0]; + __pyx_t_1.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_1.shape[0] = __pyx_v_clipPolygon.shape[1]; +__pyx_t_1.strides[0] = __pyx_v_clipPolygon.strides[1]; + __pyx_t_1.suboffsets[0] = -1; + +__PYX_XCLEAR_MEMVIEW(&__pyx_v_clipVertex, 1); + __pyx_v_clipVertex = __pyx_t_1; + __pyx_t_1.memview = NULL; + __pyx_t_1.data = NULL; + + /* "box_intersection.pyx":139 + * for cidx in range(num_clip_points): + * clipVertex = clipPolygon[cidx] + * cp2 = clipVertex # <<<<<<<<<<<<<< + * + * copy_points(outputList, inputList, noutput_list) + */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp2, 1); + __PYX_INC_MEMVIEW(&__pyx_v_clipVertex, 1); + __pyx_v_cp2 = __pyx_v_clipVertex; + + /* "box_intersection.pyx":141 + * cp2 = clipVertex + * + * copy_points(outputList, inputList, noutput_list) # <<<<<<<<<<<<<< + * ninput_list = noutput_list + * noutput_list = 0 + */ + __pyx_f_16box_intersection_copy_points(__pyx_v_outputList, __pyx_v_inputList, __pyx_v_noutput_list); if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 141, __pyx_L1_error) + + /* "box_intersection.pyx":142 + * + * copy_points(outputList, inputList, noutput_list) + * ninput_list = noutput_list # <<<<<<<<<<<<<< + * noutput_list = 0 + * + */ + __pyx_v_ninput_list = __pyx_v_noutput_list; + + /* "box_intersection.pyx":143 + * copy_points(outputList, inputList, noutput_list) + * ninput_list = noutput_list + * noutput_list = 0 # <<<<<<<<<<<<<< + * + * s = inputList[ninput_list - 1] + */ + __pyx_v_noutput_list = 0; + + /* "box_intersection.pyx":145 + * noutput_list = 0 + * + * s = inputList[ninput_list - 1] # <<<<<<<<<<<<<< + * + * for iidx in range(ninput_list): + */ + __pyx_t_1.data = __pyx_v_inputList.data; + __pyx_t_1.memview = __pyx_v_inputList.memview; + __PYX_INC_MEMVIEW(&__pyx_t_1, 1); + { + Py_ssize_t __pyx_tmp_idx = (__pyx_v_ninput_list - 1); + Py_ssize_t __pyx_tmp_stride = __pyx_v_inputList.strides[0]; + __pyx_t_1.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_1.shape[0] = __pyx_v_inputList.shape[1]; +__pyx_t_1.strides[0] = __pyx_v_inputList.strides[1]; + __pyx_t_1.suboffsets[0] = -1; + +__PYX_XCLEAR_MEMVIEW(&__pyx_v_s, 1); + __pyx_v_s = __pyx_t_1; + __pyx_t_1.memview = NULL; + __pyx_t_1.data = NULL; + + /* "box_intersection.pyx":147 + * s = inputList[ninput_list - 1] + * + * for iidx in range(ninput_list): # <<<<<<<<<<<<<< + * e = inputList[iidx] + * if inside(cp1, cp2, e): + */ + __pyx_t_11 = __pyx_v_ninput_list; + __pyx_t_12 = __pyx_t_11; + for (__pyx_t_13 = 0; __pyx_t_13 < __pyx_t_12; __pyx_t_13+=1) { + __pyx_v_iidx = __pyx_t_13; + + /* "box_intersection.pyx":148 + * + * for iidx in range(ninput_list): + * e = inputList[iidx] # <<<<<<<<<<<<<< + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): + */ + __pyx_t_1.data = __pyx_v_inputList.data; + __pyx_t_1.memview = __pyx_v_inputList.memview; + __PYX_INC_MEMVIEW(&__pyx_t_1, 1); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_iidx; + Py_ssize_t __pyx_tmp_stride = __pyx_v_inputList.strides[0]; + __pyx_t_1.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_1.shape[0] = __pyx_v_inputList.shape[1]; +__pyx_t_1.strides[0] = __pyx_v_inputList.strides[1]; + __pyx_t_1.suboffsets[0] = -1; + +__PYX_XCLEAR_MEMVIEW(&__pyx_v_e, 1); + __pyx_v_e = __pyx_t_1; + __pyx_t_1.memview = NULL; + __pyx_t_1.data = NULL; + + /* "box_intersection.pyx":149 + * for iidx in range(ninput_list): + * e = inputList[iidx] + * if inside(cp1, cp2, e): # <<<<<<<<<<<<<< + * if not inside(cp1, cp2, s): + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + */ + __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_e, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_14 = __pyx_f_16box_intersection_inside(__pyx_t_5, __pyx_t_2, __pyx_t_6); if (unlikely(__pyx_t_14 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 149, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (__pyx_t_14) { + + /* "box_intersection.pyx":150 + * e = inputList[iidx] + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * + */ + __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_s, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_14 = __pyx_f_16box_intersection_inside(__pyx_t_6, __pyx_t_2, __pyx_t_5); if (unlikely(__pyx_t_14 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 150, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_15 = (!__pyx_t_14); + if (__pyx_t_15) { + + /* "box_intersection.pyx":151 + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) # <<<<<<<<<<<<<< + * + * noutput_list = add_point(outputList, e, noutput_list) + */ + __pyx_t_16 = __pyx_f_16box_intersection_computeIntersection_and_add(__pyx_v_cp1, __pyx_v_cp2, __pyx_v_s, __pyx_v_e, __pyx_v_outputList, __pyx_v_noutput_list); if (unlikely(__pyx_t_16 == ((Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 151, __pyx_L1_error) + __pyx_v_noutput_list = __pyx_t_16; + + /* "box_intersection.pyx":150 + * e = inputList[iidx] + * if inside(cp1, cp2, e): + * if not inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * + */ + } + + /* "box_intersection.pyx":153 + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * + * noutput_list = add_point(outputList, e, noutput_list) # <<<<<<<<<<<<<< + * elif inside(cp1, cp2, s): + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + */ + __pyx_t_16 = __pyx_f_16box_intersection_add_point(__pyx_v_outputList, __pyx_v_e, __pyx_v_noutput_list); if (unlikely(__pyx_t_16 == ((Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 153, __pyx_L1_error) + __pyx_v_noutput_list = __pyx_t_16; + + /* "box_intersection.pyx":149 + * for iidx in range(ninput_list): + * e = inputList[iidx] + * if inside(cp1, cp2, e): # <<<<<<<<<<<<<< + * if not inside(cp1, cp2, s): + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + */ + goto __pyx_L7; + } + + /* "box_intersection.pyx":154 + * + * noutput_list = add_point(outputList, e, noutput_list) + * elif inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * s = e + */ + __pyx_t_5 = __pyx_memoryview_fromslice(__pyx_v_cp1, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_5)) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_5); + __pyx_t_2 = __pyx_memoryview_fromslice(__pyx_v_cp2, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __pyx_t_6 = __pyx_memoryview_fromslice(__pyx_v_s, 1, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_15 = __pyx_f_16box_intersection_inside(__pyx_t_5, __pyx_t_2, __pyx_t_6); if (unlikely(__pyx_t_15 == ((int)-1) && PyErr_Occurred())) __PYX_ERR(0, 154, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_DECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_DECREF(__pyx_t_6); __pyx_t_6 = 0; + if (__pyx_t_15) { + + /* "box_intersection.pyx":155 + * noutput_list = add_point(outputList, e, noutput_list) + * elif inside(cp1, cp2, s): + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) # <<<<<<<<<<<<<< + * s = e + * cp1 = cp2 + */ + __pyx_t_16 = __pyx_f_16box_intersection_computeIntersection_and_add(__pyx_v_cp1, __pyx_v_cp2, __pyx_v_s, __pyx_v_e, __pyx_v_outputList, __pyx_v_noutput_list); if (unlikely(__pyx_t_16 == ((Py_ssize_t)-1) && PyErr_Occurred())) __PYX_ERR(0, 155, __pyx_L1_error) + __pyx_v_noutput_list = __pyx_t_16; + + /* "box_intersection.pyx":154 + * + * noutput_list = add_point(outputList, e, noutput_list) + * elif inside(cp1, cp2, s): # <<<<<<<<<<<<<< + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * s = e + */ + } + __pyx_L7:; + + /* "box_intersection.pyx":156 + * elif inside(cp1, cp2, s): + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * s = e # <<<<<<<<<<<<<< + * cp1 = cp2 + * if noutput_list == 0: + */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_s, 1); + __PYX_INC_MEMVIEW(&__pyx_v_e, 1); + __pyx_v_s = __pyx_v_e; + } + + /* "box_intersection.pyx":157 + * noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + * s = e + * cp1 = cp2 # <<<<<<<<<<<<<< + * if noutput_list == 0: + * break + */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp1, 1); + __PYX_INC_MEMVIEW(&__pyx_v_cp2, 1); + __pyx_v_cp1 = __pyx_v_cp2; + + /* "box_intersection.pyx":158 + * s = e + * cp1 = cp2 + * if noutput_list == 0: # <<<<<<<<<<<<<< + * break + * return outputList_np, noutput_list + */ + __pyx_t_15 = (__pyx_v_noutput_list == 0); + if (__pyx_t_15) { + + /* "box_intersection.pyx":159 + * cp1 = cp2 + * if noutput_list == 0: + * break # <<<<<<<<<<<<<< + * return outputList_np, noutput_list + * + */ + goto __pyx_L4_break; + + /* "box_intersection.pyx":158 + * s = e + * cp1 = cp2 + * if noutput_list == 0: # <<<<<<<<<<<<<< + * break + * return outputList_np, noutput_list + */ + } + } + __pyx_L4_break:; + + /* "box_intersection.pyx":160 + * if noutput_list == 0: + * break + * return outputList_np, noutput_list # <<<<<<<<<<<<<< + * + * + */ + __Pyx_XDECREF(__pyx_r); + __pyx_t_6 = PyInt_FromSsize_t(__pyx_v_noutput_list); if (unlikely(!__pyx_t_6)) __PYX_ERR(0, 160, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_6); + __pyx_t_2 = PyTuple_New(2); if (unlikely(!__pyx_t_2)) __PYX_ERR(0, 160, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_2); + __Pyx_INCREF(__pyx_v_outputList_np); + __Pyx_GIVEREF(__pyx_v_outputList_np); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 0, __pyx_v_outputList_np)) __PYX_ERR(0, 160, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_t_6); + if (__Pyx_PyTuple_SET_ITEM(__pyx_t_2, 1, __pyx_t_6)) __PYX_ERR(0, 160, __pyx_L1_error); + __pyx_t_6 = 0; + __pyx_r = __pyx_t_2; + __pyx_t_2 = 0; + goto __pyx_L0; + + /* "box_intersection.pyx":115 + * return num_points + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def polygon_clip_float(float [:, :] subjectPolygon, float [:, :] clipPolygon): + */ + + /* function exit code */ + __pyx_L1_error:; + __PYX_XCLEAR_MEMVIEW(&__pyx_t_1, 1); + __Pyx_XDECREF(__pyx_t_2); + __Pyx_XDECREF(__pyx_t_3); + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_6); + __PYX_XCLEAR_MEMVIEW(&__pyx_t_7, 1); + __Pyx_AddTraceback("box_intersection.polygon_clip_float", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp1, 1); + __Pyx_XDECREF(__pyx_v_outputList_np); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_outputList, 1); + __Pyx_XDECREF(__pyx_v_inputList_np); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_inputList, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_clipVertex, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_cp2, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_s, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_e, 1); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +/* "box_intersection.pyx":164 + * + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def box_intersection(float [:, :, :, :] rect1, + */ + +/* Python wrapper */ +static PyObject *__pyx_pw_16box_intersection_7box_intersection(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +); /*proto*/ +PyDoc_STRVAR(__pyx_doc_16box_intersection_6box_intersection, "\n rect1 - B x K1 x 8 x 3 matrix of box corners\n rect2 - B x K2 x 8 x 3 matrix of box corners\n non_rot_inter_areas - intersection areas of boxes \n "); +static PyMethodDef __pyx_mdef_16box_intersection_7box_intersection = {"box_intersection", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw_16box_intersection_7box_intersection, __Pyx_METH_FASTCALL|METH_KEYWORDS, __pyx_doc_16box_intersection_6box_intersection}; +static PyObject *__pyx_pw_16box_intersection_7box_intersection(PyObject *__pyx_self, +#if CYTHON_METH_FASTCALL +PyObject *const *__pyx_args, Py_ssize_t __pyx_nargs, PyObject *__pyx_kwds +#else +PyObject *__pyx_args, PyObject *__pyx_kwds +#endif +) { + __Pyx_memviewslice __pyx_v_rect1 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_rect2 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_non_rot_inter_areas = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_nums_k2 = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_memviewslice __pyx_v_inter_areas = { 0, 0, { 0 }, { 0 }, { 0 } }; + int __pyx_v_approximate; + #if !CYTHON_METH_FASTCALL + CYTHON_UNUSED Py_ssize_t __pyx_nargs; + #endif + CYTHON_UNUSED PyObject *const *__pyx_kwvalues; + PyObject* values[6] = {0,0,0,0,0,0}; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + PyObject *__pyx_r = 0; + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("box_intersection (wrapper)", 0); + #if !CYTHON_METH_FASTCALL + #if CYTHON_ASSUME_SAFE_MACROS + __pyx_nargs = PyTuple_GET_SIZE(__pyx_args); + #else + __pyx_nargs = PyTuple_Size(__pyx_args); + if (unlikely((__pyx_nargs < 0))) __PYX_ERR(0, 164, __pyx_L3_error) + #endif + #endif + __pyx_kwvalues = __Pyx_KwValues_FASTCALL(__pyx_args, __pyx_nargs); + { + PyObject **__pyx_pyargnames[] = {&__pyx_n_s_rect1,&__pyx_n_s_rect2,&__pyx_n_s_non_rot_inter_areas,&__pyx_n_s_nums_k2,&__pyx_n_s_inter_areas,&__pyx_n_s_approximate,0}; + if (__pyx_kwds) { + Py_ssize_t kw_args; + switch (__pyx_nargs) { + case 6: values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); + CYTHON_FALLTHROUGH; + case 5: values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); + CYTHON_FALLTHROUGH; + case 4: values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); + CYTHON_FALLTHROUGH; + case 3: values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + CYTHON_FALLTHROUGH; + case 2: values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + CYTHON_FALLTHROUGH; + case 1: values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + CYTHON_FALLTHROUGH; + case 0: break; + default: goto __pyx_L5_argtuple_error; + } + kw_args = __Pyx_NumKwargs_FASTCALL(__pyx_kwds); + switch (__pyx_nargs) { + case 0: + if (likely((values[0] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_rect1)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[0]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + else goto __pyx_L5_argtuple_error; + CYTHON_FALLTHROUGH; + case 1: + if (likely((values[1] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_rect2)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[1]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("box_intersection", 1, 6, 6, 1); __PYX_ERR(0, 164, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 2: + if (likely((values[2] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_non_rot_inter_areas)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[2]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("box_intersection", 1, 6, 6, 2); __PYX_ERR(0, 164, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 3: + if (likely((values[3] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_nums_k2)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[3]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("box_intersection", 1, 6, 6, 3); __PYX_ERR(0, 164, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 4: + if (likely((values[4] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_inter_areas)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[4]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("box_intersection", 1, 6, 6, 4); __PYX_ERR(0, 164, __pyx_L3_error) + } + CYTHON_FALLTHROUGH; + case 5: + if (likely((values[5] = __Pyx_GetKwValue_FASTCALL(__pyx_kwds, __pyx_kwvalues, __pyx_n_s_approximate)) != 0)) { + (void)__Pyx_Arg_NewRef_FASTCALL(values[5]); + kw_args--; + } + else if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 164, __pyx_L3_error) + else { + __Pyx_RaiseArgtupleInvalid("box_intersection", 1, 6, 6, 5); __PYX_ERR(0, 164, __pyx_L3_error) + } + } + if (unlikely(kw_args > 0)) { + const Py_ssize_t kwd_pos_args = __pyx_nargs; + if (unlikely(__Pyx_ParseOptionalKeywords(__pyx_kwds, __pyx_kwvalues, __pyx_pyargnames, 0, values + 0, kwd_pos_args, "box_intersection") < 0)) __PYX_ERR(0, 164, __pyx_L3_error) + } + } else if (unlikely(__pyx_nargs != 6)) { + goto __pyx_L5_argtuple_error; + } else { + values[0] = __Pyx_Arg_FASTCALL(__pyx_args, 0); + values[1] = __Pyx_Arg_FASTCALL(__pyx_args, 1); + values[2] = __Pyx_Arg_FASTCALL(__pyx_args, 2); + values[3] = __Pyx_Arg_FASTCALL(__pyx_args, 3); + values[4] = __Pyx_Arg_FASTCALL(__pyx_args, 4); + values[5] = __Pyx_Arg_FASTCALL(__pyx_args, 5); + } + __pyx_v_rect1 = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(values[0], PyBUF_WRITABLE); if (unlikely(!__pyx_v_rect1.memview)) __PYX_ERR(0, 166, __pyx_L3_error) + __pyx_v_rect2 = __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(values[1], PyBUF_WRITABLE); if (unlikely(!__pyx_v_rect2.memview)) __PYX_ERR(0, 167, __pyx_L3_error) + __pyx_v_non_rot_inter_areas = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_float(values[2], PyBUF_WRITABLE); if (unlikely(!__pyx_v_non_rot_inter_areas.memview)) __PYX_ERR(0, 168, __pyx_L3_error) + __pyx_v_nums_k2 = __Pyx_PyObject_to_MemoryviewSlice_ds_int(values[3], PyBUF_WRITABLE); if (unlikely(!__pyx_v_nums_k2.memview)) __PYX_ERR(0, 169, __pyx_L3_error) + __pyx_v_inter_areas = __Pyx_PyObject_to_MemoryviewSlice_dsdsds_float(values[4], PyBUF_WRITABLE); if (unlikely(!__pyx_v_inter_areas.memview)) __PYX_ERR(0, 170, __pyx_L3_error) + __pyx_v_approximate = __Pyx_PyObject_IsTrue(values[5]); if (unlikely((__pyx_v_approximate == (int)-1) && PyErr_Occurred())) __PYX_ERR(0, 171, __pyx_L3_error) + } + goto __pyx_L4_argument_unpacking_done; + __pyx_L5_argtuple_error:; + __Pyx_RaiseArgtupleInvalid("box_intersection", 1, 6, 6, __pyx_nargs); __PYX_ERR(0, 164, __pyx_L3_error) + goto __pyx_L3_error; + __pyx_L3_error:; + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __PYX_XCLEAR_MEMVIEW(&__pyx_v_rect1, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_rect2, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_non_rot_inter_areas, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_nums_k2, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_inter_areas, 1); + __Pyx_AddTraceback("box_intersection.box_intersection", __pyx_clineno, __pyx_lineno, __pyx_filename); + __Pyx_RefNannyFinishContext(); + return NULL; + __pyx_L4_argument_unpacking_done:; + __pyx_r = __pyx_pf_16box_intersection_6box_intersection(__pyx_self, __pyx_v_rect1, __pyx_v_rect2, __pyx_v_non_rot_inter_areas, __pyx_v_nums_k2, __pyx_v_inter_areas, __pyx_v_approximate); + + /* function exit code */ + __PYX_XCLEAR_MEMVIEW(&__pyx_v_rect1, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_rect2, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_non_rot_inter_areas, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_nums_k2, 1); + __PYX_XCLEAR_MEMVIEW(&__pyx_v_inter_areas, 1); + { + Py_ssize_t __pyx_temp; + for (__pyx_temp=0; __pyx_temp < (Py_ssize_t)(sizeof(values)/sizeof(values[0])); ++__pyx_temp) { + __Pyx_Arg_XDECREF_FASTCALL(values[__pyx_temp]); + } + } + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} + +static PyObject *__pyx_pf_16box_intersection_6box_intersection(CYTHON_UNUSED PyObject *__pyx_self, __Pyx_memviewslice __pyx_v_rect1, __Pyx_memviewslice __pyx_v_rect2, __Pyx_memviewslice __pyx_v_non_rot_inter_areas, __Pyx_memviewslice __pyx_v_nums_k2, __Pyx_memviewslice __pyx_v_inter_areas, int __pyx_v_approximate) { + Py_ssize_t __pyx_v_B; + Py_ssize_t __pyx_v_K1; + Py_ssize_t __pyx_v_K2; + Py_ssize_t __pyx_v_b; + Py_ssize_t __pyx_v_k1; + Py_ssize_t __pyx_v_k2; + PyObject *__pyx_v_inter = NULL; + Py_ssize_t __pyx_v_ninter; + PyObject *__pyx_v_xs = NULL; + PyObject *__pyx_v_ys = NULL; + PyObject *__pyx_8genexpr1__pyx_v_x = NULL; + PyObject *__pyx_8genexpr2__pyx_v_x = NULL; + PyObject *__pyx_r = NULL; + __Pyx_RefNannyDeclarations + Py_ssize_t __pyx_t_1; + Py_ssize_t __pyx_t_2; + Py_ssize_t __pyx_t_3; + Py_ssize_t __pyx_t_4; + Py_ssize_t __pyx_t_5; + Py_ssize_t __pyx_t_6; + Py_ssize_t __pyx_t_7; + Py_ssize_t __pyx_t_8; + Py_ssize_t __pyx_t_9; + Py_ssize_t __pyx_t_10; + int __pyx_t_11; + Py_ssize_t __pyx_t_12; + Py_ssize_t __pyx_t_13; + int __pyx_t_14; + PyObject *__pyx_t_15 = NULL; + PyObject *__pyx_t_16 = NULL; + __Pyx_memviewslice __pyx_t_17 = { 0, 0, { 0 }, { 0 }, { 0 } }; + PyObject *__pyx_t_18 = NULL; + PyObject *__pyx_t_19 = NULL; + PyObject *__pyx_t_20 = NULL; + int __pyx_t_21; + Py_ssize_t __pyx_t_22; + PyObject *(*__pyx_t_23)(PyObject *); + PyObject *__pyx_t_24 = NULL; + PyObject *__pyx_t_25 = NULL; + PyObject *__pyx_t_26 = NULL; + float __pyx_t_27; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("box_intersection", 0); + + /* "box_intersection.pyx":178 + * """ + * + * cdef Py_ssize_t B = rect1.shape[0] # <<<<<<<<<<<<<< + * cdef Py_ssize_t K1 = rect1.shape[1] + * cdef Py_ssize_t K2 = rect2.shape[2] + */ + __pyx_v_B = (__pyx_v_rect1.shape[0]); + + /* "box_intersection.pyx":179 + * + * cdef Py_ssize_t B = rect1.shape[0] + * cdef Py_ssize_t K1 = rect1.shape[1] # <<<<<<<<<<<<<< + * cdef Py_ssize_t K2 = rect2.shape[2] + * + */ + __pyx_v_K1 = (__pyx_v_rect1.shape[1]); + + /* "box_intersection.pyx":180 + * cdef Py_ssize_t B = rect1.shape[0] + * cdef Py_ssize_t K1 = rect1.shape[1] + * cdef Py_ssize_t K2 = rect2.shape[2] # <<<<<<<<<<<<<< + * + * + */ + __pyx_v_K2 = (__pyx_v_rect2.shape[2]); + + /* "box_intersection.pyx":183 + * + * + * for b in range(B): # <<<<<<<<<<<<<< + * for k1 in range(K1): + * for k2 in range(K2): + */ + __pyx_t_1 = __pyx_v_B; + __pyx_t_2 = __pyx_t_1; + for (__pyx_t_3 = 0; __pyx_t_3 < __pyx_t_2; __pyx_t_3+=1) { + __pyx_v_b = __pyx_t_3; + + /* "box_intersection.pyx":184 + * + * for b in range(B): + * for k1 in range(K1): # <<<<<<<<<<<<<< + * for k2 in range(K2): + * if k2 >= nums_k2[b]: + */ + __pyx_t_4 = __pyx_v_K1; + __pyx_t_5 = __pyx_t_4; + for (__pyx_t_6 = 0; __pyx_t_6 < __pyx_t_5; __pyx_t_6+=1) { + __pyx_v_k1 = __pyx_t_6; + + /* "box_intersection.pyx":185 + * for b in range(B): + * for k1 in range(K1): + * for k2 in range(K2): # <<<<<<<<<<<<<< + * if k2 >= nums_k2[b]: + * break + */ + __pyx_t_7 = __pyx_v_K2; + __pyx_t_8 = __pyx_t_7; + for (__pyx_t_9 = 0; __pyx_t_9 < __pyx_t_8; __pyx_t_9+=1) { + __pyx_v_k2 = __pyx_t_9; + + /* "box_intersection.pyx":186 + * for k1 in range(K1): + * for k2 in range(K2): + * if k2 >= nums_k2[b]: # <<<<<<<<<<<<<< + * break + * + */ + __pyx_t_10 = __pyx_v_b; + __pyx_t_11 = (__pyx_v_k2 >= (*((int *) ( /* dim=0 */ (__pyx_v_nums_k2.data + __pyx_t_10 * __pyx_v_nums_k2.strides[0]) )))); + if (__pyx_t_11) { + + /* "box_intersection.pyx":187 + * for k2 in range(K2): + * if k2 >= nums_k2[b]: + * break # <<<<<<<<<<<<<< + * + * if approximate and non_rot_inter_areas[b][k1][k2] == 0: + */ + goto __pyx_L8_break; + + /* "box_intersection.pyx":186 + * for k1 in range(K1): + * for k2 in range(K2): + * if k2 >= nums_k2[b]: # <<<<<<<<<<<<<< + * break + * + */ + } + + /* "box_intersection.pyx":189 + * break + * + * if approximate and non_rot_inter_areas[b][k1][k2] == 0: # <<<<<<<<<<<<<< + * continue + * + */ + if (__pyx_v_approximate) { + } else { + __pyx_t_11 = __pyx_v_approximate; + goto __pyx_L11_bool_binop_done; + } + __pyx_t_10 = __pyx_v_b; + __pyx_t_12 = __pyx_v_k1; + __pyx_t_13 = __pyx_v_k2; + __pyx_t_14 = ((*((float *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_non_rot_inter_areas.data + __pyx_t_10 * __pyx_v_non_rot_inter_areas.strides[0]) ) + __pyx_t_12 * __pyx_v_non_rot_inter_areas.strides[1]) ) + __pyx_t_13 * __pyx_v_non_rot_inter_areas.strides[2]) ))) == 0.0); + __pyx_t_11 = __pyx_t_14; + __pyx_L11_bool_binop_done:; + if (__pyx_t_11) { + + /* "box_intersection.pyx":190 + * + * if approximate and non_rot_inter_areas[b][k1][k2] == 0: + * continue # <<<<<<<<<<<<<< + * + * ##### compute volume of intersection + */ + goto __pyx_L7_continue; + + /* "box_intersection.pyx":189 + * break + * + * if approximate and non_rot_inter_areas[b][k1][k2] == 0: # <<<<<<<<<<<<<< + * continue + * + */ + } + + /* "box_intersection.pyx":193 + * + * ##### compute volume of intersection + * inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) # <<<<<<<<<<<<<< + * ninter = len(inter) + * if ninter > 0: # there is some intersection between the boxes + */ + __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_n_s_polygon_clip_unnest); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 193, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __pyx_t_17.data = __pyx_v_rect1.data; + __pyx_t_17.memview = __pyx_v_rect1.memview; + __PYX_INC_MEMVIEW(&__pyx_t_17, 1); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_b; + Py_ssize_t __pyx_tmp_stride = __pyx_v_rect1.strides[0]; + __pyx_t_17.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +{ + Py_ssize_t __pyx_tmp_idx = __pyx_v_k1; + Py_ssize_t __pyx_tmp_stride = __pyx_v_rect1.strides[1]; + __pyx_t_17.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_17.shape[0] = __pyx_v_rect1.shape[2]; +__pyx_t_17.strides[0] = __pyx_v_rect1.strides[2]; + __pyx_t_17.suboffsets[0] = -1; + +__pyx_t_17.shape[1] = __pyx_v_rect1.shape[3]; +__pyx_t_17.strides[1] = __pyx_v_rect1.strides[3]; + __pyx_t_17.suboffsets[1] = -1; + +__pyx_t_18 = __pyx_memoryview_fromslice(__pyx_t_17, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 193, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_18); + __PYX_XCLEAR_MEMVIEW(&__pyx_t_17, 1); + __pyx_t_17.memview = NULL; __pyx_t_17.data = NULL; + __pyx_t_17.data = __pyx_v_rect2.data; + __pyx_t_17.memview = __pyx_v_rect2.memview; + __PYX_INC_MEMVIEW(&__pyx_t_17, 1); + { + Py_ssize_t __pyx_tmp_idx = __pyx_v_b; + Py_ssize_t __pyx_tmp_stride = __pyx_v_rect2.strides[0]; + __pyx_t_17.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +{ + Py_ssize_t __pyx_tmp_idx = __pyx_v_k2; + Py_ssize_t __pyx_tmp_stride = __pyx_v_rect2.strides[1]; + __pyx_t_17.data += __pyx_tmp_idx * __pyx_tmp_stride; +} + +__pyx_t_17.shape[0] = __pyx_v_rect2.shape[2]; +__pyx_t_17.strides[0] = __pyx_v_rect2.strides[2]; + __pyx_t_17.suboffsets[0] = -1; + +__pyx_t_17.shape[1] = __pyx_v_rect2.shape[3]; +__pyx_t_17.strides[1] = __pyx_v_rect2.strides[3]; + __pyx_t_17.suboffsets[1] = -1; + +__pyx_t_19 = __pyx_memoryview_fromslice(__pyx_t_17, 2, (PyObject *(*)(char *)) __pyx_memview_get_float, (int (*)(char *, PyObject *)) __pyx_memview_set_float, 0);; if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 193, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __PYX_XCLEAR_MEMVIEW(&__pyx_t_17, 1); + __pyx_t_17.memview = NULL; __pyx_t_17.data = NULL; + __pyx_t_20 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_16))) { + __pyx_t_20 = PyMethod_GET_SELF(__pyx_t_16); + if (likely(__pyx_t_20)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_16); + __Pyx_INCREF(__pyx_t_20); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_16, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_20, __pyx_t_18, __pyx_t_19}; + __pyx_t_15 = __Pyx_PyObject_FastCall(__pyx_t_16, __pyx_callargs+1-__pyx_t_21, 2+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_20); __pyx_t_20 = 0; + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 193, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + } + __Pyx_XDECREF_SET(__pyx_v_inter, __pyx_t_15); + __pyx_t_15 = 0; + + /* "box_intersection.pyx":194 + * ##### compute volume of intersection + * inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) + * ninter = len(inter) # <<<<<<<<<<<<<< + * if ninter > 0: # there is some intersection between the boxes + * xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) + */ + __pyx_t_22 = PyObject_Length(__pyx_v_inter); if (unlikely(__pyx_t_22 == ((Py_ssize_t)-1))) __PYX_ERR(0, 194, __pyx_L1_error) + __pyx_v_ninter = __pyx_t_22; + + /* "box_intersection.pyx":195 + * inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) + * ninter = len(inter) + * if ninter > 0: # there is some intersection between the boxes # <<<<<<<<<<<<<< + * xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) + * ys = np.array([x[1] for x in inter]).astype(dtype=FLOAT) + */ + __pyx_t_11 = (__pyx_v_ninter > 0); + if (__pyx_t_11) { + + /* "box_intersection.pyx":196 + * ninter = len(inter) + * if ninter > 0: # there is some intersection between the boxes + * xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) # <<<<<<<<<<<<<< + * ys = np.array([x[1] for x in inter]).astype(dtype=FLOAT) + * inter_areas[b,k1,k2] = 0.5 * np.abs(np.dot(xs,np.roll(ys,1))-np.dot(ys,np.roll(xs,1))) + */ + __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_n_s_np); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_n_s_array); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + { /* enter inner scope */ + __pyx_t_16 = PyList_New(0); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 196, __pyx_L16_error) + __Pyx_GOTREF(__pyx_t_16); + if (likely(PyList_CheckExact(__pyx_v_inter)) || PyTuple_CheckExact(__pyx_v_inter)) { + __pyx_t_18 = __pyx_v_inter; __Pyx_INCREF(__pyx_t_18); __pyx_t_22 = 0; + __pyx_t_23 = NULL; + } else { + __pyx_t_22 = -1; __pyx_t_18 = PyObject_GetIter(__pyx_v_inter); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 196, __pyx_L16_error) + __Pyx_GOTREF(__pyx_t_18); + __pyx_t_23 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_18); if (unlikely(!__pyx_t_23)) __PYX_ERR(0, 196, __pyx_L16_error) + } + for (;;) { + if (likely(!__pyx_t_23)) { + if (likely(PyList_CheckExact(__pyx_t_18))) { + if (__pyx_t_22 >= PyList_GET_SIZE(__pyx_t_18)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_20 = PyList_GET_ITEM(__pyx_t_18, __pyx_t_22); __Pyx_INCREF(__pyx_t_20); __pyx_t_22++; if (unlikely((0 < 0))) __PYX_ERR(0, 196, __pyx_L16_error) + #else + __pyx_t_20 = PySequence_ITEM(__pyx_t_18, __pyx_t_22); __pyx_t_22++; if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 196, __pyx_L16_error) + __Pyx_GOTREF(__pyx_t_20); + #endif + } else { + if (__pyx_t_22 >= PyTuple_GET_SIZE(__pyx_t_18)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_20 = PyTuple_GET_ITEM(__pyx_t_18, __pyx_t_22); __Pyx_INCREF(__pyx_t_20); __pyx_t_22++; if (unlikely((0 < 0))) __PYX_ERR(0, 196, __pyx_L16_error) + #else + __pyx_t_20 = PySequence_ITEM(__pyx_t_18, __pyx_t_22); __pyx_t_22++; if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 196, __pyx_L16_error) + __Pyx_GOTREF(__pyx_t_20); + #endif + } + } else { + __pyx_t_20 = __pyx_t_23(__pyx_t_18); + if (unlikely(!__pyx_t_20)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(0, 196, __pyx_L16_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_20); + } + __Pyx_XDECREF_SET(__pyx_8genexpr1__pyx_v_x, __pyx_t_20); + __pyx_t_20 = 0; + __pyx_t_20 = __Pyx_GetItemInt(__pyx_8genexpr1__pyx_v_x, 0, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 196, __pyx_L16_error) + __Pyx_GOTREF(__pyx_t_20); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_16, (PyObject*)__pyx_t_20))) __PYX_ERR(0, 196, __pyx_L16_error) + __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; + } + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_x); __pyx_8genexpr1__pyx_v_x = 0; + goto __pyx_L20_exit_scope; + __pyx_L16_error:; + __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_x); __pyx_8genexpr1__pyx_v_x = 0; + goto __pyx_L1_error; + __pyx_L20_exit_scope:; + } /* exit inner scope */ + __pyx_t_18 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_19))) { + __pyx_t_18 = PyMethod_GET_SELF(__pyx_t_19); + if (likely(__pyx_t_18)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_19); + __Pyx_INCREF(__pyx_t_18); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_19, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_18, __pyx_t_16}; + __pyx_t_15 = __Pyx_PyObject_FastCall(__pyx_t_19, __pyx_callargs+1-__pyx_t_21, 1+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + } + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_15, __pyx_n_s_astype); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __pyx_t_15 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_n_s_FLOAT); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + if (PyDict_SetItem(__pyx_t_15, __pyx_n_s_dtype, __pyx_t_16) < 0) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + __pyx_t_16 = __Pyx_PyObject_Call(__pyx_t_19, __pyx_empty_tuple, __pyx_t_15); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 196, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __Pyx_XDECREF_SET(__pyx_v_xs, __pyx_t_16); + __pyx_t_16 = 0; + + /* "box_intersection.pyx":197 + * if ninter > 0: # there is some intersection between the boxes + * xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) + * ys = np.array([x[1] for x in inter]).astype(dtype=FLOAT) # <<<<<<<<<<<<<< + * inter_areas[b,k1,k2] = 0.5 * np.abs(np.dot(xs,np.roll(ys,1))-np.dot(ys,np.roll(xs,1))) + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_n_s_np); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_15, __pyx_n_s_array); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + { /* enter inner scope */ + __pyx_t_15 = PyList_New(0); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 197, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_15); + if (likely(PyList_CheckExact(__pyx_v_inter)) || PyTuple_CheckExact(__pyx_v_inter)) { + __pyx_t_18 = __pyx_v_inter; __Pyx_INCREF(__pyx_t_18); __pyx_t_22 = 0; + __pyx_t_23 = NULL; + } else { + __pyx_t_22 = -1; __pyx_t_18 = PyObject_GetIter(__pyx_v_inter); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 197, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_18); + __pyx_t_23 = __Pyx_PyObject_GetIterNextFunc(__pyx_t_18); if (unlikely(!__pyx_t_23)) __PYX_ERR(0, 197, __pyx_L23_error) + } + for (;;) { + if (likely(!__pyx_t_23)) { + if (likely(PyList_CheckExact(__pyx_t_18))) { + if (__pyx_t_22 >= PyList_GET_SIZE(__pyx_t_18)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_20 = PyList_GET_ITEM(__pyx_t_18, __pyx_t_22); __Pyx_INCREF(__pyx_t_20); __pyx_t_22++; if (unlikely((0 < 0))) __PYX_ERR(0, 197, __pyx_L23_error) + #else + __pyx_t_20 = PySequence_ITEM(__pyx_t_18, __pyx_t_22); __pyx_t_22++; if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 197, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_20); + #endif + } else { + if (__pyx_t_22 >= PyTuple_GET_SIZE(__pyx_t_18)) break; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + __pyx_t_20 = PyTuple_GET_ITEM(__pyx_t_18, __pyx_t_22); __Pyx_INCREF(__pyx_t_20); __pyx_t_22++; if (unlikely((0 < 0))) __PYX_ERR(0, 197, __pyx_L23_error) + #else + __pyx_t_20 = PySequence_ITEM(__pyx_t_18, __pyx_t_22); __pyx_t_22++; if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 197, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_20); + #endif + } + } else { + __pyx_t_20 = __pyx_t_23(__pyx_t_18); + if (unlikely(!__pyx_t_20)) { + PyObject* exc_type = PyErr_Occurred(); + if (exc_type) { + if (likely(__Pyx_PyErr_GivenExceptionMatches(exc_type, PyExc_StopIteration))) PyErr_Clear(); + else __PYX_ERR(0, 197, __pyx_L23_error) + } + break; + } + __Pyx_GOTREF(__pyx_t_20); + } + __Pyx_XDECREF_SET(__pyx_8genexpr2__pyx_v_x, __pyx_t_20); + __pyx_t_20 = 0; + __pyx_t_20 = __Pyx_GetItemInt(__pyx_8genexpr2__pyx_v_x, 1, long, 1, __Pyx_PyInt_From_long, 0, 0, 0); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 197, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_20); + if (unlikely(__Pyx_ListComp_Append(__pyx_t_15, (PyObject*)__pyx_t_20))) __PYX_ERR(0, 197, __pyx_L23_error) + __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; + } + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_x); __pyx_8genexpr2__pyx_v_x = 0; + goto __pyx_L27_exit_scope; + __pyx_L23_error:; + __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_x); __pyx_8genexpr2__pyx_v_x = 0; + goto __pyx_L1_error; + __pyx_L27_exit_scope:; + } /* exit inner scope */ + __pyx_t_18 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_19))) { + __pyx_t_18 = PyMethod_GET_SELF(__pyx_t_19); + if (likely(__pyx_t_18)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_19); + __Pyx_INCREF(__pyx_t_18); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_19, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_18, __pyx_t_15}; + __pyx_t_16 = __Pyx_PyObject_FastCall(__pyx_t_19, __pyx_callargs+1-__pyx_t_21, 1+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + } + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_n_s_astype); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + __pyx_t_16 = __Pyx_PyDict_NewPresized(1); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __Pyx_GetModuleGlobalName(__pyx_t_15, __pyx_n_s_FLOAT); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + if (PyDict_SetItem(__pyx_t_16, __pyx_n_s_dtype, __pyx_t_15) < 0) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __pyx_t_15 = __Pyx_PyObject_Call(__pyx_t_19, __pyx_empty_tuple, __pyx_t_16); if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 197, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + __Pyx_XDECREF_SET(__pyx_v_ys, __pyx_t_15); + __pyx_t_15 = 0; + + /* "box_intersection.pyx":198 + * xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) + * ys = np.array([x[1] for x in inter]).astype(dtype=FLOAT) + * inter_areas[b,k1,k2] = 0.5 * np.abs(np.dot(xs,np.roll(ys,1))-np.dot(ys,np.roll(xs,1))) # <<<<<<<<<<<<<< + * + * + */ + __Pyx_GetModuleGlobalName(__pyx_t_16, __pyx_n_s_np); if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __pyx_t_19 = __Pyx_PyObject_GetAttrStr(__pyx_t_16, __pyx_n_s_abs); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + __Pyx_GetModuleGlobalName(__pyx_t_18, __pyx_n_s_np); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_18); + __pyx_t_20 = __Pyx_PyObject_GetAttrStr(__pyx_t_18, __pyx_n_s_dot); if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_20); + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_GetModuleGlobalName(__pyx_t_24, __pyx_n_s_np); if (unlikely(!__pyx_t_24)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_24); + __pyx_t_25 = __Pyx_PyObject_GetAttrStr(__pyx_t_24, __pyx_n_s_roll); if (unlikely(!__pyx_t_25)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_25); + __Pyx_DECREF(__pyx_t_24); __pyx_t_24 = 0; + __pyx_t_24 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_25))) { + __pyx_t_24 = PyMethod_GET_SELF(__pyx_t_25); + if (likely(__pyx_t_24)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_25); + __Pyx_INCREF(__pyx_t_24); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_25, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_24, __pyx_v_ys, __pyx_int_1}; + __pyx_t_18 = __Pyx_PyObject_FastCall(__pyx_t_25, __pyx_callargs+1-__pyx_t_21, 2+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_24); __pyx_t_24 = 0; + if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_18); + __Pyx_DECREF(__pyx_t_25); __pyx_t_25 = 0; + } + __pyx_t_25 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_20))) { + __pyx_t_25 = PyMethod_GET_SELF(__pyx_t_20); + if (likely(__pyx_t_25)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_20); + __Pyx_INCREF(__pyx_t_25); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_20, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_25, __pyx_v_xs, __pyx_t_18}; + __pyx_t_16 = __Pyx_PyObject_FastCall(__pyx_t_20, __pyx_callargs+1-__pyx_t_21, 2+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_25); __pyx_t_25 = 0; + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + if (unlikely(!__pyx_t_16)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_16); + __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; + } + __Pyx_GetModuleGlobalName(__pyx_t_18, __pyx_n_s_np); if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_18); + __pyx_t_25 = __Pyx_PyObject_GetAttrStr(__pyx_t_18, __pyx_n_s_dot); if (unlikely(!__pyx_t_25)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_25); + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + __Pyx_GetModuleGlobalName(__pyx_t_24, __pyx_n_s_np); if (unlikely(!__pyx_t_24)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_24); + __pyx_t_26 = __Pyx_PyObject_GetAttrStr(__pyx_t_24, __pyx_n_s_roll); if (unlikely(!__pyx_t_26)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_26); + __Pyx_DECREF(__pyx_t_24); __pyx_t_24 = 0; + __pyx_t_24 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_26))) { + __pyx_t_24 = PyMethod_GET_SELF(__pyx_t_26); + if (likely(__pyx_t_24)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_26); + __Pyx_INCREF(__pyx_t_24); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_26, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_24, __pyx_v_xs, __pyx_int_1}; + __pyx_t_18 = __Pyx_PyObject_FastCall(__pyx_t_26, __pyx_callargs+1-__pyx_t_21, 2+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_24); __pyx_t_24 = 0; + if (unlikely(!__pyx_t_18)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_18); + __Pyx_DECREF(__pyx_t_26); __pyx_t_26 = 0; + } + __pyx_t_26 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_25))) { + __pyx_t_26 = PyMethod_GET_SELF(__pyx_t_25); + if (likely(__pyx_t_26)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_25); + __Pyx_INCREF(__pyx_t_26); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_25, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[3] = {__pyx_t_26, __pyx_v_ys, __pyx_t_18}; + __pyx_t_20 = __Pyx_PyObject_FastCall(__pyx_t_25, __pyx_callargs+1-__pyx_t_21, 2+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_26); __pyx_t_26 = 0; + __Pyx_DECREF(__pyx_t_18); __pyx_t_18 = 0; + if (unlikely(!__pyx_t_20)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_20); + __Pyx_DECREF(__pyx_t_25); __pyx_t_25 = 0; + } + __pyx_t_25 = PyNumber_Subtract(__pyx_t_16, __pyx_t_20); if (unlikely(!__pyx_t_25)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_25); + __Pyx_DECREF(__pyx_t_16); __pyx_t_16 = 0; + __Pyx_DECREF(__pyx_t_20); __pyx_t_20 = 0; + __pyx_t_20 = NULL; + __pyx_t_21 = 0; + #if CYTHON_UNPACK_METHODS + if (unlikely(PyMethod_Check(__pyx_t_19))) { + __pyx_t_20 = PyMethod_GET_SELF(__pyx_t_19); + if (likely(__pyx_t_20)) { + PyObject* function = PyMethod_GET_FUNCTION(__pyx_t_19); + __Pyx_INCREF(__pyx_t_20); + __Pyx_INCREF(function); + __Pyx_DECREF_SET(__pyx_t_19, function); + __pyx_t_21 = 1; + } + } + #endif + { + PyObject *__pyx_callargs[2] = {__pyx_t_20, __pyx_t_25}; + __pyx_t_15 = __Pyx_PyObject_FastCall(__pyx_t_19, __pyx_callargs+1-__pyx_t_21, 1+__pyx_t_21); + __Pyx_XDECREF(__pyx_t_20); __pyx_t_20 = 0; + __Pyx_DECREF(__pyx_t_25); __pyx_t_25 = 0; + if (unlikely(!__pyx_t_15)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_15); + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + } + __pyx_t_19 = PyNumber_Multiply(__pyx_float_0_5, __pyx_t_15); if (unlikely(!__pyx_t_19)) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_19); + __Pyx_DECREF(__pyx_t_15); __pyx_t_15 = 0; + __pyx_t_27 = __pyx_PyFloat_AsFloat(__pyx_t_19); if (unlikely((__pyx_t_27 == (float)-1) && PyErr_Occurred())) __PYX_ERR(0, 198, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_19); __pyx_t_19 = 0; + __pyx_t_13 = __pyx_v_b; + __pyx_t_12 = __pyx_v_k1; + __pyx_t_10 = __pyx_v_k2; + *((float *) ( /* dim=2 */ (( /* dim=1 */ (( /* dim=0 */ (__pyx_v_inter_areas.data + __pyx_t_13 * __pyx_v_inter_areas.strides[0]) ) + __pyx_t_12 * __pyx_v_inter_areas.strides[1]) ) + __pyx_t_10 * __pyx_v_inter_areas.strides[2]) )) = __pyx_t_27; + + /* "box_intersection.pyx":195 + * inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) + * ninter = len(inter) + * if ninter > 0: # there is some intersection between the boxes # <<<<<<<<<<<<<< + * xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) + * ys = np.array([x[1] for x in inter]).astype(dtype=FLOAT) + */ + } + __pyx_L7_continue:; + } + __pyx_L8_break:; + } + } + + /* "box_intersection.pyx":164 + * + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def box_intersection(float [:, :, :, :] rect1, + */ + + /* function exit code */ + __pyx_r = Py_None; __Pyx_INCREF(Py_None); + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_15); + __Pyx_XDECREF(__pyx_t_16); + __PYX_XCLEAR_MEMVIEW(&__pyx_t_17, 1); + __Pyx_XDECREF(__pyx_t_18); + __Pyx_XDECREF(__pyx_t_19); + __Pyx_XDECREF(__pyx_t_20); + __Pyx_XDECREF(__pyx_t_24); + __Pyx_XDECREF(__pyx_t_25); + __Pyx_XDECREF(__pyx_t_26); + __Pyx_AddTraceback("box_intersection.box_intersection", __pyx_clineno, __pyx_lineno, __pyx_filename); + __pyx_r = NULL; + __pyx_L0:; + __Pyx_XDECREF(__pyx_v_inter); + __Pyx_XDECREF(__pyx_v_xs); + __Pyx_XDECREF(__pyx_v_ys); + __Pyx_XDECREF(__pyx_8genexpr1__pyx_v_x); + __Pyx_XDECREF(__pyx_8genexpr2__pyx_v_x); + __Pyx_XGIVEREF(__pyx_r); + __Pyx_RefNannyFinishContext(); + return __pyx_r; +} +static struct __pyx_vtabstruct_array __pyx_vtable_array; + +static PyObject *__pyx_tp_new_array(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_array_obj *p; + PyObject *o; + #if CYTHON_COMPILING_IN_LIMITED_API + allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); + o = alloc_func(t, 0); + #else + if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + #endif + p = ((struct __pyx_array_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_array; + p->mode = ((PyObject*)Py_None); Py_INCREF(Py_None); + p->_format = ((PyObject*)Py_None); Py_INCREF(Py_None); + if (unlikely(__pyx_array___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_array(PyObject *o) { + struct __pyx_array_obj *p = (struct __pyx_array_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && (!PyType_IS_GC(Py_TYPE(o)) || !__Pyx_PyObject_GC_IsFinalized(o))) { + if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_array) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + } + #endif + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_array___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->mode); + Py_CLEAR(p->_format); + #if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + (*Py_TYPE(o)->tp_free)(o); + #else + { + freefunc tp_free = (freefunc)PyType_GetSlot(Py_TYPE(o), Py_tp_free); + if (tp_free) tp_free(o); + } + #endif +} +static PyObject *__pyx_sq_item_array(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_array(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_array___setitem__(o, i, v); + } + else { + __Pyx_TypeName o_type_name; + o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name); + __Pyx_DECREF_TypeName(o_type_name); + return -1; + } +} + +static PyObject *__pyx_tp_getattro_array(PyObject *o, PyObject *n) { + PyObject *v = __Pyx_PyObject_GenericGetAttr(o, n); + if (!v && PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + v = __pyx_array___getattr__(o, n); + } + return v; +} + +static PyObject *__pyx_getprop___pyx_array_memview(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_5array_7memview_1__get__(o); +} + +static PyMethodDef __pyx_methods_array[] = { + {"__getattr__", (PyCFunction)__pyx_array___getattr__, METH_O|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_array_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_array_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_array[] = { + {(char *)"memview", __pyx_getprop___pyx_array_memview, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; +#if CYTHON_USE_TYPE_SPECS +#if !CYTHON_COMPILING_IN_LIMITED_API + +static PyBufferProcs __pyx_tp_as_buffer_array = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_array_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; +#endif +static PyType_Slot __pyx_type___pyx_array_slots[] = { + {Py_tp_dealloc, (void *)__pyx_tp_dealloc_array}, + {Py_sq_length, (void *)__pyx_array___len__}, + {Py_sq_item, (void *)__pyx_sq_item_array}, + {Py_mp_length, (void *)__pyx_array___len__}, + {Py_mp_subscript, (void *)__pyx_array___getitem__}, + {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_array}, + {Py_tp_getattro, (void *)__pyx_tp_getattro_array}, + #if defined(Py_bf_getbuffer) + {Py_bf_getbuffer, (void *)__pyx_array_getbuffer}, + #endif + {Py_tp_methods, (void *)__pyx_methods_array}, + {Py_tp_getset, (void *)__pyx_getsets_array}, + {Py_tp_new, (void *)__pyx_tp_new_array}, + {0, 0}, +}; +static PyType_Spec __pyx_type___pyx_array_spec = { + "box_intersection.array", + sizeof(struct __pyx_array_obj), + 0, + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_SEQUENCE, + __pyx_type___pyx_array_slots, +}; +#else + +static PySequenceMethods __pyx_tp_as_sequence_array = { + __pyx_array___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_array, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_array = { + __pyx_array___len__, /*mp_length*/ + __pyx_array___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_array, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_array = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_array_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_array = { + PyVarObject_HEAD_INIT(0, 0) + "box_intersection.""array", /*tp_name*/ + sizeof(struct __pyx_array_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_array, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + 0, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_array, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_array, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + __pyx_tp_getattro_array, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_array, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_SEQUENCE, /*tp_flags*/ + 0, /*tp_doc*/ + 0, /*tp_traverse*/ + 0, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_array, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_array, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + #if !CYTHON_USE_TYPE_SPECS + 0, /*tp_dictoffset*/ + #endif + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_array, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + #if CYTHON_USE_TP_FINALIZE + 0, /*tp_finalize*/ + #else + NULL, /*tp_finalize*/ + #endif + #endif + #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ + #endif + #if __PYX_NEED_TP_PRINT_SLOT == 1 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ + #endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ + #endif +}; +#endif + +static PyObject *__pyx_tp_new_Enum(PyTypeObject *t, CYTHON_UNUSED PyObject *a, CYTHON_UNUSED PyObject *k) { + struct __pyx_MemviewEnum_obj *p; + PyObject *o; + #if CYTHON_COMPILING_IN_LIMITED_API + allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); + o = alloc_func(t, 0); + #else + if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + #endif + p = ((struct __pyx_MemviewEnum_obj *)o); + p->name = Py_None; Py_INCREF(Py_None); + return o; +} + +static void __pyx_tp_dealloc_Enum(PyObject *o) { + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) { + if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_Enum) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + } + #endif + PyObject_GC_UnTrack(o); + Py_CLEAR(p->name); + #if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + (*Py_TYPE(o)->tp_free)(o); + #else + { + freefunc tp_free = (freefunc)PyType_GetSlot(Py_TYPE(o), Py_tp_free); + if (tp_free) tp_free(o); + } + #endif +} + +static int __pyx_tp_traverse_Enum(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + if (p->name) { + e = (*v)(p->name, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_Enum(PyObject *o) { + PyObject* tmp; + struct __pyx_MemviewEnum_obj *p = (struct __pyx_MemviewEnum_obj *)o; + tmp = ((PyObject*)p->name); + p->name = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + return 0; +} + +static PyObject *__pyx_specialmethod___pyx_MemviewEnum___repr__(PyObject *self, CYTHON_UNUSED PyObject *arg) { + return __pyx_MemviewEnum___repr__(self); +} + +static PyMethodDef __pyx_methods_Enum[] = { + {"__repr__", (PyCFunction)__pyx_specialmethod___pyx_MemviewEnum___repr__, METH_NOARGS|METH_COEXIST, 0}, + {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_MemviewEnum_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_MemviewEnum_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_type___pyx_MemviewEnum_slots[] = { + {Py_tp_dealloc, (void *)__pyx_tp_dealloc_Enum}, + {Py_tp_repr, (void *)__pyx_MemviewEnum___repr__}, + {Py_tp_traverse, (void *)__pyx_tp_traverse_Enum}, + {Py_tp_clear, (void *)__pyx_tp_clear_Enum}, + {Py_tp_methods, (void *)__pyx_methods_Enum}, + {Py_tp_init, (void *)__pyx_MemviewEnum___init__}, + {Py_tp_new, (void *)__pyx_tp_new_Enum}, + {0, 0}, +}; +static PyType_Spec __pyx_type___pyx_MemviewEnum_spec = { + "box_intersection.Enum", + sizeof(struct __pyx_MemviewEnum_obj), + 0, + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, + __pyx_type___pyx_MemviewEnum_slots, +}; +#else + +static PyTypeObject __pyx_type___pyx_MemviewEnum = { + PyVarObject_HEAD_INIT(0, 0) + "box_intersection.""Enum", /*tp_name*/ + sizeof(struct __pyx_MemviewEnum_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_Enum, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_MemviewEnum___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + 0, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_Enum, /*tp_traverse*/ + __pyx_tp_clear_Enum, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_Enum, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + #if !CYTHON_USE_TYPE_SPECS + 0, /*tp_dictoffset*/ + #endif + __pyx_MemviewEnum___init__, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_Enum, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + #if CYTHON_USE_TP_FINALIZE + 0, /*tp_finalize*/ + #else + NULL, /*tp_finalize*/ + #endif + #endif + #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ + #endif + #if __PYX_NEED_TP_PRINT_SLOT == 1 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ + #endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ + #endif +}; +#endif +static struct __pyx_vtabstruct_memoryview __pyx_vtable_memoryview; + +static PyObject *__pyx_tp_new_memoryview(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryview_obj *p; + PyObject *o; + #if CYTHON_COMPILING_IN_LIMITED_API + allocfunc alloc_func = (allocfunc)PyType_GetSlot(t, Py_tp_alloc); + o = alloc_func(t, 0); + #else + if (likely(!__Pyx_PyType_HasFeature(t, Py_TPFLAGS_IS_ABSTRACT))) { + o = (*t->tp_alloc)(t, 0); + } else { + o = (PyObject *) PyBaseObject_Type.tp_new(t, __pyx_empty_tuple, 0); + } + if (unlikely(!o)) return 0; + #endif + p = ((struct __pyx_memoryview_obj *)o); + p->__pyx_vtab = __pyx_vtabptr_memoryview; + p->obj = Py_None; Py_INCREF(Py_None); + p->_size = Py_None; Py_INCREF(Py_None); + p->_array_interface = Py_None; Py_INCREF(Py_None); + p->view.obj = NULL; + if (unlikely(__pyx_memoryview___cinit__(o, a, k) < 0)) goto bad; + return o; + bad: + Py_DECREF(o); o = 0; + return NULL; +} + +static void __pyx_tp_dealloc_memoryview(PyObject *o) { + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) { + if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc_memoryview) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryview___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->obj); + Py_CLEAR(p->_size); + Py_CLEAR(p->_array_interface); + #if CYTHON_USE_TYPE_SLOTS || CYTHON_COMPILING_IN_PYPY + (*Py_TYPE(o)->tp_free)(o); + #else + { + freefunc tp_free = (freefunc)PyType_GetSlot(Py_TYPE(o), Py_tp_free); + if (tp_free) tp_free(o); + } + #endif +} + +static int __pyx_tp_traverse_memoryview(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + if (p->obj) { + e = (*v)(p->obj, a); if (e) return e; + } + if (p->_size) { + e = (*v)(p->_size, a); if (e) return e; + } + if (p->_array_interface) { + e = (*v)(p->_array_interface, a); if (e) return e; + } + if (p->view.obj) { + e = (*v)(p->view.obj, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear_memoryview(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryview_obj *p = (struct __pyx_memoryview_obj *)o; + tmp = ((PyObject*)p->obj); + p->obj = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_size); + p->_size = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + tmp = ((PyObject*)p->_array_interface); + p->_array_interface = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + Py_CLEAR(p->view.obj); + return 0; +} +static PyObject *__pyx_sq_item_memoryview(PyObject *o, Py_ssize_t i) { + PyObject *r; + PyObject *x = PyInt_FromSsize_t(i); if(!x) return 0; + r = Py_TYPE(o)->tp_as_mapping->mp_subscript(o, x); + Py_DECREF(x); + return r; +} + +static int __pyx_mp_ass_subscript_memoryview(PyObject *o, PyObject *i, PyObject *v) { + if (v) { + return __pyx_memoryview___setitem__(o, i, v); + } + else { + __Pyx_TypeName o_type_name; + o_type_name = __Pyx_PyType_GetName(Py_TYPE(o)); + PyErr_Format(PyExc_NotImplementedError, + "Subscript deletion not supported by " __Pyx_FMT_TYPENAME, o_type_name); + __Pyx_DECREF_TypeName(o_type_name); + return -1; + } +} + +static PyObject *__pyx_getprop___pyx_memoryview_T(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_1T_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_base(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4base_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_shape(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_5shape_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_strides(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_7strides_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_suboffsets(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_10suboffsets_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_ndim(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4ndim_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_itemsize(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_8itemsize_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_nbytes(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_6nbytes_1__get__(o); +} + +static PyObject *__pyx_getprop___pyx_memoryview_size(PyObject *o, CYTHON_UNUSED void *x) { + return __pyx_pw_15View_dot_MemoryView_10memoryview_4size_1__get__(o); +} + +static PyObject *__pyx_specialmethod___pyx_memoryview___repr__(PyObject *self, CYTHON_UNUSED PyObject *arg) { + return __pyx_memoryview___repr__(self); +} + +static PyMethodDef __pyx_methods_memoryview[] = { + {"__repr__", (PyCFunction)__pyx_specialmethod___pyx_memoryview___repr__, METH_NOARGS|METH_COEXIST, 0}, + {"is_c_contig", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_is_c_contig, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"is_f_contig", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_is_f_contig, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"copy", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_copy, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"copy_fortran", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_memoryview_copy_fortran, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryview_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryview_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; + +static struct PyGetSetDef __pyx_getsets_memoryview[] = { + {(char *)"T", __pyx_getprop___pyx_memoryview_T, 0, (char *)0, 0}, + {(char *)"base", __pyx_getprop___pyx_memoryview_base, 0, (char *)0, 0}, + {(char *)"shape", __pyx_getprop___pyx_memoryview_shape, 0, (char *)0, 0}, + {(char *)"strides", __pyx_getprop___pyx_memoryview_strides, 0, (char *)0, 0}, + {(char *)"suboffsets", __pyx_getprop___pyx_memoryview_suboffsets, 0, (char *)0, 0}, + {(char *)"ndim", __pyx_getprop___pyx_memoryview_ndim, 0, (char *)0, 0}, + {(char *)"itemsize", __pyx_getprop___pyx_memoryview_itemsize, 0, (char *)0, 0}, + {(char *)"nbytes", __pyx_getprop___pyx_memoryview_nbytes, 0, (char *)0, 0}, + {(char *)"size", __pyx_getprop___pyx_memoryview_size, 0, (char *)0, 0}, + {0, 0, 0, 0, 0} +}; +#if CYTHON_USE_TYPE_SPECS +#if !CYTHON_COMPILING_IN_LIMITED_API + +static PyBufferProcs __pyx_tp_as_buffer_memoryview = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_memoryview_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; +#endif +static PyType_Slot __pyx_type___pyx_memoryview_slots[] = { + {Py_tp_dealloc, (void *)__pyx_tp_dealloc_memoryview}, + {Py_tp_repr, (void *)__pyx_memoryview___repr__}, + {Py_sq_length, (void *)__pyx_memoryview___len__}, + {Py_sq_item, (void *)__pyx_sq_item_memoryview}, + {Py_mp_length, (void *)__pyx_memoryview___len__}, + {Py_mp_subscript, (void *)__pyx_memoryview___getitem__}, + {Py_mp_ass_subscript, (void *)__pyx_mp_ass_subscript_memoryview}, + {Py_tp_str, (void *)__pyx_memoryview___str__}, + #if defined(Py_bf_getbuffer) + {Py_bf_getbuffer, (void *)__pyx_memoryview_getbuffer}, + #endif + {Py_tp_traverse, (void *)__pyx_tp_traverse_memoryview}, + {Py_tp_clear, (void *)__pyx_tp_clear_memoryview}, + {Py_tp_methods, (void *)__pyx_methods_memoryview}, + {Py_tp_getset, (void *)__pyx_getsets_memoryview}, + {Py_tp_new, (void *)__pyx_tp_new_memoryview}, + {0, 0}, +}; +static PyType_Spec __pyx_type___pyx_memoryview_spec = { + "box_intersection.memoryview", + sizeof(struct __pyx_memoryview_obj), + 0, + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, + __pyx_type___pyx_memoryview_slots, +}; +#else + +static PySequenceMethods __pyx_tp_as_sequence_memoryview = { + __pyx_memoryview___len__, /*sq_length*/ + 0, /*sq_concat*/ + 0, /*sq_repeat*/ + __pyx_sq_item_memoryview, /*sq_item*/ + 0, /*sq_slice*/ + 0, /*sq_ass_item*/ + 0, /*sq_ass_slice*/ + 0, /*sq_contains*/ + 0, /*sq_inplace_concat*/ + 0, /*sq_inplace_repeat*/ +}; + +static PyMappingMethods __pyx_tp_as_mapping_memoryview = { + __pyx_memoryview___len__, /*mp_length*/ + __pyx_memoryview___getitem__, /*mp_subscript*/ + __pyx_mp_ass_subscript_memoryview, /*mp_ass_subscript*/ +}; + +static PyBufferProcs __pyx_tp_as_buffer_memoryview = { + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getreadbuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getwritebuffer*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getsegcount*/ + #endif + #if PY_MAJOR_VERSION < 3 + 0, /*bf_getcharbuffer*/ + #endif + __pyx_memoryview_getbuffer, /*bf_getbuffer*/ + 0, /*bf_releasebuffer*/ +}; + +static PyTypeObject __pyx_type___pyx_memoryview = { + PyVarObject_HEAD_INIT(0, 0) + "box_intersection.""memoryview", /*tp_name*/ + sizeof(struct __pyx_memoryview_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc_memoryview, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + __pyx_memoryview___repr__, /*tp_repr*/ + 0, /*tp_as_number*/ + &__pyx_tp_as_sequence_memoryview, /*tp_as_sequence*/ + &__pyx_tp_as_mapping_memoryview, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + __pyx_memoryview___str__, /*tp_str*/ + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + &__pyx_tp_as_buffer_memoryview, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC, /*tp_flags*/ + 0, /*tp_doc*/ + __pyx_tp_traverse_memoryview, /*tp_traverse*/ + __pyx_tp_clear_memoryview, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods_memoryview, /*tp_methods*/ + 0, /*tp_members*/ + __pyx_getsets_memoryview, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + #if !CYTHON_USE_TYPE_SPECS + 0, /*tp_dictoffset*/ + #endif + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new_memoryview, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + #if CYTHON_USE_TP_FINALIZE + 0, /*tp_finalize*/ + #else + NULL, /*tp_finalize*/ + #endif + #endif + #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ + #endif + #if __PYX_NEED_TP_PRINT_SLOT == 1 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ + #endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ + #endif +}; +#endif +static struct __pyx_vtabstruct__memoryviewslice __pyx_vtable__memoryviewslice; + +static PyObject *__pyx_tp_new__memoryviewslice(PyTypeObject *t, PyObject *a, PyObject *k) { + struct __pyx_memoryviewslice_obj *p; + PyObject *o = __pyx_tp_new_memoryview(t, a, k); + if (unlikely(!o)) return 0; + p = ((struct __pyx_memoryviewslice_obj *)o); + p->__pyx_base.__pyx_vtab = (struct __pyx_vtabstruct_memoryview*)__pyx_vtabptr__memoryviewslice; + p->from_object = Py_None; Py_INCREF(Py_None); + p->from_slice.memview = NULL; + return o; +} + +static void __pyx_tp_dealloc__memoryviewslice(PyObject *o) { + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + #if CYTHON_USE_TP_FINALIZE + if (unlikely((PY_VERSION_HEX >= 0x03080000 || __Pyx_PyType_HasFeature(Py_TYPE(o), Py_TPFLAGS_HAVE_FINALIZE)) && __Pyx_PyObject_GetSlot(o, tp_finalize, destructor)) && !__Pyx_PyObject_GC_IsFinalized(o)) { + if (__Pyx_PyObject_GetSlot(o, tp_dealloc, destructor) == __pyx_tp_dealloc__memoryviewslice) { + if (PyObject_CallFinalizerFromDealloc(o)) return; + } + } + #endif + PyObject_GC_UnTrack(o); + { + PyObject *etype, *eval, *etb; + PyErr_Fetch(&etype, &eval, &etb); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) + 1); + __pyx_memoryviewslice___dealloc__(o); + __Pyx_SET_REFCNT(o, Py_REFCNT(o) - 1); + PyErr_Restore(etype, eval, etb); + } + Py_CLEAR(p->from_object); + PyObject_GC_Track(o); + __pyx_tp_dealloc_memoryview(o); +} + +static int __pyx_tp_traverse__memoryviewslice(PyObject *o, visitproc v, void *a) { + int e; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + e = __pyx_tp_traverse_memoryview(o, v, a); if (e) return e; + if (p->from_object) { + e = (*v)(p->from_object, a); if (e) return e; + } + return 0; +} + +static int __pyx_tp_clear__memoryviewslice(PyObject *o) { + PyObject* tmp; + struct __pyx_memoryviewslice_obj *p = (struct __pyx_memoryviewslice_obj *)o; + __pyx_tp_clear_memoryview(o); + tmp = ((PyObject*)p->from_object); + p->from_object = Py_None; Py_INCREF(Py_None); + Py_XDECREF(tmp); + __PYX_XCLEAR_MEMVIEW(&p->from_slice, 1); + return 0; +} + +static PyMethodDef __pyx_methods__memoryviewslice[] = { + {"__reduce_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryviewslice_1__reduce_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {"__setstate_cython__", (PyCFunction)(void*)(__Pyx_PyCFunction_FastCallWithKeywords)__pyx_pw___pyx_memoryviewslice_3__setstate_cython__, __Pyx_METH_FASTCALL|METH_KEYWORDS, 0}, + {0, 0, 0, 0} +}; +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_type___pyx_memoryviewslice_slots[] = { + {Py_tp_dealloc, (void *)__pyx_tp_dealloc__memoryviewslice}, + {Py_tp_doc, (void *)PyDoc_STR("Internal class for passing memoryview slices to Python")}, + {Py_tp_traverse, (void *)__pyx_tp_traverse__memoryviewslice}, + {Py_tp_clear, (void *)__pyx_tp_clear__memoryviewslice}, + {Py_tp_methods, (void *)__pyx_methods__memoryviewslice}, + {Py_tp_new, (void *)__pyx_tp_new__memoryviewslice}, + {0, 0}, +}; +static PyType_Spec __pyx_type___pyx_memoryviewslice_spec = { + "box_intersection._memoryviewslice", + sizeof(struct __pyx_memoryviewslice_obj), + 0, + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE, + __pyx_type___pyx_memoryviewslice_slots, +}; +#else + +static PyTypeObject __pyx_type___pyx_memoryviewslice = { + PyVarObject_HEAD_INIT(0, 0) + "box_intersection.""_memoryviewslice", /*tp_name*/ + sizeof(struct __pyx_memoryviewslice_obj), /*tp_basicsize*/ + 0, /*tp_itemsize*/ + __pyx_tp_dealloc__memoryviewslice, /*tp_dealloc*/ + #if PY_VERSION_HEX < 0x030800b4 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030800b4 + 0, /*tp_vectorcall_offset*/ + #endif + 0, /*tp_getattr*/ + 0, /*tp_setattr*/ + #if PY_MAJOR_VERSION < 3 + 0, /*tp_compare*/ + #endif + #if PY_MAJOR_VERSION >= 3 + 0, /*tp_as_async*/ + #endif + #if CYTHON_COMPILING_IN_PYPY || 0 + __pyx_memoryview___repr__, /*tp_repr*/ + #else + 0, /*tp_repr*/ + #endif + 0, /*tp_as_number*/ + 0, /*tp_as_sequence*/ + 0, /*tp_as_mapping*/ + 0, /*tp_hash*/ + 0, /*tp_call*/ + #if CYTHON_COMPILING_IN_PYPY || 0 + __pyx_memoryview___str__, /*tp_str*/ + #else + 0, /*tp_str*/ + #endif + 0, /*tp_getattro*/ + 0, /*tp_setattro*/ + 0, /*tp_as_buffer*/ + Py_TPFLAGS_DEFAULT|Py_TPFLAGS_HAVE_VERSION_TAG|Py_TPFLAGS_CHECKTYPES|Py_TPFLAGS_HAVE_NEWBUFFER|Py_TPFLAGS_BASETYPE|Py_TPFLAGS_HAVE_GC|Py_TPFLAGS_SEQUENCE, /*tp_flags*/ + PyDoc_STR("Internal class for passing memoryview slices to Python"), /*tp_doc*/ + __pyx_tp_traverse__memoryviewslice, /*tp_traverse*/ + __pyx_tp_clear__memoryviewslice, /*tp_clear*/ + 0, /*tp_richcompare*/ + 0, /*tp_weaklistoffset*/ + 0, /*tp_iter*/ + 0, /*tp_iternext*/ + __pyx_methods__memoryviewslice, /*tp_methods*/ + 0, /*tp_members*/ + 0, /*tp_getset*/ + 0, /*tp_base*/ + 0, /*tp_dict*/ + 0, /*tp_descr_get*/ + 0, /*tp_descr_set*/ + #if !CYTHON_USE_TYPE_SPECS + 0, /*tp_dictoffset*/ + #endif + 0, /*tp_init*/ + 0, /*tp_alloc*/ + __pyx_tp_new__memoryviewslice, /*tp_new*/ + 0, /*tp_free*/ + 0, /*tp_is_gc*/ + 0, /*tp_bases*/ + 0, /*tp_mro*/ + 0, /*tp_cache*/ + 0, /*tp_subclasses*/ + 0, /*tp_weaklist*/ + 0, /*tp_del*/ + 0, /*tp_version_tag*/ + #if PY_VERSION_HEX >= 0x030400a1 + #if CYTHON_USE_TP_FINALIZE + 0, /*tp_finalize*/ + #else + NULL, /*tp_finalize*/ + #endif + #endif + #if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, /*tp_vectorcall*/ + #endif + #if __PYX_NEED_TP_PRINT_SLOT == 1 + 0, /*tp_print*/ + #endif + #if PY_VERSION_HEX >= 0x030C0000 + 0, /*tp_watched*/ + #endif + #if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, /*tp_pypy_flags*/ + #endif +}; +#endif + +static PyMethodDef __pyx_methods[] = { + {0, 0, 0, 0} +}; +#ifndef CYTHON_SMALL_CODE +#if defined(__clang__) + #define CYTHON_SMALL_CODE +#elif defined(__GNUC__) && (__GNUC__ > 4 || (__GNUC__ == 4 && __GNUC_MINOR__ >= 3)) + #define CYTHON_SMALL_CODE __attribute__((cold)) +#else + #define CYTHON_SMALL_CODE +#endif +#endif +/* #### Code section: pystring_table ### */ + +static int __Pyx_CreateStringTabAndInitStrings(void) { + __Pyx_StringTabEntry __pyx_string_tab[] = { + {&__pyx_kp_u_, __pyx_k_, sizeof(__pyx_k_), 0, 1, 0, 0}, + {&__pyx_n_s_ASCII, __pyx_k_ASCII, sizeof(__pyx_k_ASCII), 0, 0, 1, 1}, + {&__pyx_kp_s_All_dimensions_preceding_dimensi, __pyx_k_All_dimensions_preceding_dimensi, sizeof(__pyx_k_All_dimensions_preceding_dimensi), 0, 0, 1, 0}, + {&__pyx_n_s_AssertionError, __pyx_k_AssertionError, sizeof(__pyx_k_AssertionError), 0, 0, 1, 1}, + {&__pyx_n_s_B, __pyx_k_B, sizeof(__pyx_k_B), 0, 0, 1, 1}, + {&__pyx_kp_s_Buffer_view_does_not_expose_stri, __pyx_k_Buffer_view_does_not_expose_stri, sizeof(__pyx_k_Buffer_view_does_not_expose_stri), 0, 0, 1, 0}, + {&__pyx_kp_s_Can_only_create_a_buffer_that_is, __pyx_k_Can_only_create_a_buffer_that_is, sizeof(__pyx_k_Can_only_create_a_buffer_that_is), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_assign_to_read_only_memor, __pyx_k_Cannot_assign_to_read_only_memor, sizeof(__pyx_k_Cannot_assign_to_read_only_memor), 0, 0, 1, 0}, + {&__pyx_kp_s_Cannot_create_writable_memory_vi, __pyx_k_Cannot_create_writable_memory_vi, sizeof(__pyx_k_Cannot_create_writable_memory_vi), 0, 0, 1, 0}, + {&__pyx_kp_u_Cannot_index_with_type, __pyx_k_Cannot_index_with_type, sizeof(__pyx_k_Cannot_index_with_type), 0, 1, 0, 0}, + {&__pyx_kp_s_Cannot_transpose_memoryview_with, __pyx_k_Cannot_transpose_memoryview_with, sizeof(__pyx_k_Cannot_transpose_memoryview_with), 0, 0, 1, 0}, + {&__pyx_kp_s_Dimension_d_is_not_direct, __pyx_k_Dimension_d_is_not_direct, sizeof(__pyx_k_Dimension_d_is_not_direct), 0, 0, 1, 0}, + {&__pyx_n_s_Ellipsis, __pyx_k_Ellipsis, sizeof(__pyx_k_Ellipsis), 0, 0, 1, 1}, + {&__pyx_kp_s_Empty_shape_tuple_for_cython_arr, __pyx_k_Empty_shape_tuple_for_cython_arr, sizeof(__pyx_k_Empty_shape_tuple_for_cython_arr), 0, 0, 1, 0}, + {&__pyx_n_s_FLOAT, __pyx_k_FLOAT, sizeof(__pyx_k_FLOAT), 0, 0, 1, 1}, + {&__pyx_n_s_ImportError, __pyx_k_ImportError, sizeof(__pyx_k_ImportError), 0, 0, 1, 1}, + {&__pyx_kp_s_Incompatible_checksums_0x_x_vs_0, __pyx_k_Incompatible_checksums_0x_x_vs_0, sizeof(__pyx_k_Incompatible_checksums_0x_x_vs_0), 0, 0, 1, 0}, + {&__pyx_n_s_IndexError, __pyx_k_IndexError, sizeof(__pyx_k_IndexError), 0, 0, 1, 1}, + {&__pyx_kp_s_Index_out_of_bounds_axis_d, __pyx_k_Index_out_of_bounds_axis_d, sizeof(__pyx_k_Index_out_of_bounds_axis_d), 0, 0, 1, 0}, + {&__pyx_kp_s_Indirect_dimensions_not_supporte, __pyx_k_Indirect_dimensions_not_supporte, sizeof(__pyx_k_Indirect_dimensions_not_supporte), 0, 0, 1, 0}, + {&__pyx_kp_u_Invalid_mode_expected_c_or_fortr, __pyx_k_Invalid_mode_expected_c_or_fortr, sizeof(__pyx_k_Invalid_mode_expected_c_or_fortr), 0, 1, 0, 0}, + {&__pyx_kp_u_Invalid_shape_in_axis, __pyx_k_Invalid_shape_in_axis, sizeof(__pyx_k_Invalid_shape_in_axis), 0, 1, 0, 0}, + {&__pyx_n_s_K1, __pyx_k_K1, sizeof(__pyx_k_K1), 0, 0, 1, 1}, + {&__pyx_n_s_K2, __pyx_k_K2, sizeof(__pyx_k_K2), 0, 0, 1, 1}, + {&__pyx_n_s_MAX_INTERSECT_POINTS, __pyx_k_MAX_INTERSECT_POINTS, sizeof(__pyx_k_MAX_INTERSECT_POINTS), 0, 0, 1, 1}, + {&__pyx_n_s_MemoryError, __pyx_k_MemoryError, sizeof(__pyx_k_MemoryError), 0, 0, 1, 1}, + {&__pyx_kp_s_MemoryView_of_r_at_0x_x, __pyx_k_MemoryView_of_r_at_0x_x, sizeof(__pyx_k_MemoryView_of_r_at_0x_x), 0, 0, 1, 0}, + {&__pyx_kp_s_MemoryView_of_r_object, __pyx_k_MemoryView_of_r_object, sizeof(__pyx_k_MemoryView_of_r_object), 0, 0, 1, 0}, + {&__pyx_n_b_O, __pyx_k_O, sizeof(__pyx_k_O), 0, 0, 0, 1}, + {&__pyx_kp_u_Out_of_bounds_on_buffer_access_a, __pyx_k_Out_of_bounds_on_buffer_access_a, sizeof(__pyx_k_Out_of_bounds_on_buffer_access_a), 0, 1, 0, 0}, + {&__pyx_n_s_PickleError, __pyx_k_PickleError, sizeof(__pyx_k_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_Sequence, __pyx_k_Sequence, sizeof(__pyx_k_Sequence), 0, 0, 1, 1}, + {&__pyx_kp_s_Step_may_not_be_zero_axis_d, __pyx_k_Step_may_not_be_zero_axis_d, sizeof(__pyx_k_Step_may_not_be_zero_axis_d), 0, 0, 1, 0}, + {&__pyx_n_s_TypeError, __pyx_k_TypeError, sizeof(__pyx_k_TypeError), 0, 0, 1, 1}, + {&__pyx_kp_s_Unable_to_convert_item_to_object, __pyx_k_Unable_to_convert_item_to_object, sizeof(__pyx_k_Unable_to_convert_item_to_object), 0, 0, 1, 0}, + {&__pyx_n_s_ValueError, __pyx_k_ValueError, sizeof(__pyx_k_ValueError), 0, 0, 1, 1}, + {&__pyx_n_s_View_MemoryView, __pyx_k_View_MemoryView, sizeof(__pyx_k_View_MemoryView), 0, 0, 1, 1}, + {&__pyx_kp_u__2, __pyx_k__2, sizeof(__pyx_k__2), 0, 1, 0, 0}, + {&__pyx_n_s__3, __pyx_k__3, sizeof(__pyx_k__3), 0, 0, 1, 1}, + {&__pyx_n_s__30, __pyx_k__30, sizeof(__pyx_k__30), 0, 0, 1, 1}, + {&__pyx_kp_u__6, __pyx_k__6, sizeof(__pyx_k__6), 0, 1, 0, 0}, + {&__pyx_kp_u__7, __pyx_k__7, sizeof(__pyx_k__7), 0, 1, 0, 0}, + {&__pyx_n_s_abc, __pyx_k_abc, sizeof(__pyx_k_abc), 0, 0, 1, 1}, + {&__pyx_n_s_abs, __pyx_k_abs, sizeof(__pyx_k_abs), 0, 0, 1, 1}, + {&__pyx_n_s_allocate_buffer, __pyx_k_allocate_buffer, sizeof(__pyx_k_allocate_buffer), 0, 0, 1, 1}, + {&__pyx_kp_u_and, __pyx_k_and, sizeof(__pyx_k_and), 0, 1, 0, 0}, + {&__pyx_n_s_approximate, __pyx_k_approximate, sizeof(__pyx_k_approximate), 0, 0, 1, 1}, + {&__pyx_n_s_array, __pyx_k_array, sizeof(__pyx_k_array), 0, 0, 1, 1}, + {&__pyx_n_s_astype, __pyx_k_astype, sizeof(__pyx_k_astype), 0, 0, 1, 1}, + {&__pyx_n_s_asyncio_coroutines, __pyx_k_asyncio_coroutines, sizeof(__pyx_k_asyncio_coroutines), 0, 0, 1, 1}, + {&__pyx_n_s_b, __pyx_k_b, sizeof(__pyx_k_b), 0, 0, 1, 1}, + {&__pyx_n_s_base, __pyx_k_base, sizeof(__pyx_k_base), 0, 0, 1, 1}, + {&__pyx_n_s_box_intersection, __pyx_k_box_intersection, sizeof(__pyx_k_box_intersection), 0, 0, 1, 1}, + {&__pyx_kp_s_box_intersection_pyx, __pyx_k_box_intersection_pyx, sizeof(__pyx_k_box_intersection_pyx), 0, 0, 1, 0}, + {&__pyx_n_s_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 0, 1, 1}, + {&__pyx_n_u_c, __pyx_k_c, sizeof(__pyx_k_c), 0, 1, 0, 1}, + {&__pyx_n_s_cidx, __pyx_k_cidx, sizeof(__pyx_k_cidx), 0, 0, 1, 1}, + {&__pyx_n_s_class, __pyx_k_class, sizeof(__pyx_k_class), 0, 0, 1, 1}, + {&__pyx_n_s_class_getitem, __pyx_k_class_getitem, sizeof(__pyx_k_class_getitem), 0, 0, 1, 1}, + {&__pyx_n_s_clear, __pyx_k_clear, sizeof(__pyx_k_clear), 0, 0, 1, 1}, + {&__pyx_n_s_cline_in_traceback, __pyx_k_cline_in_traceback, sizeof(__pyx_k_cline_in_traceback), 0, 0, 1, 1}, + {&__pyx_n_s_clipPolygon, __pyx_k_clipPolygon, sizeof(__pyx_k_clipPolygon), 0, 0, 1, 1}, + {&__pyx_n_s_clipVertex, __pyx_k_clipVertex, sizeof(__pyx_k_clipVertex), 0, 0, 1, 1}, + {&__pyx_n_s_collections, __pyx_k_collections, sizeof(__pyx_k_collections), 0, 0, 1, 1}, + {&__pyx_kp_s_collections_abc, __pyx_k_collections_abc, sizeof(__pyx_k_collections_abc), 0, 0, 1, 0}, + {&__pyx_n_s_computeIntersection, __pyx_k_computeIntersection, sizeof(__pyx_k_computeIntersection), 0, 0, 1, 1}, + {&__pyx_kp_s_contiguous_and_direct, __pyx_k_contiguous_and_direct, sizeof(__pyx_k_contiguous_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_contiguous_and_indirect, __pyx_k_contiguous_and_indirect, sizeof(__pyx_k_contiguous_and_indirect), 0, 0, 1, 0}, + {&__pyx_n_s_copy, __pyx_k_copy, sizeof(__pyx_k_copy), 0, 0, 1, 1}, + {&__pyx_n_s_count, __pyx_k_count, sizeof(__pyx_k_count), 0, 0, 1, 1}, + {&__pyx_n_s_cp1, __pyx_k_cp1, sizeof(__pyx_k_cp1), 0, 0, 1, 1}, + {&__pyx_n_s_cp2, __pyx_k_cp2, sizeof(__pyx_k_cp2), 0, 0, 1, 1}, + {&__pyx_n_s_dc, __pyx_k_dc, sizeof(__pyx_k_dc), 0, 0, 1, 1}, + {&__pyx_n_s_dict, __pyx_k_dict, sizeof(__pyx_k_dict), 0, 0, 1, 1}, + {&__pyx_kp_u_disable, __pyx_k_disable, sizeof(__pyx_k_disable), 0, 1, 0, 0}, + {&__pyx_n_s_dot, __pyx_k_dot, sizeof(__pyx_k_dot), 0, 0, 1, 1}, + {&__pyx_n_s_dp, __pyx_k_dp, sizeof(__pyx_k_dp), 0, 0, 1, 1}, + {&__pyx_n_s_dtype, __pyx_k_dtype, sizeof(__pyx_k_dtype), 0, 0, 1, 1}, + {&__pyx_n_s_dtype_is_object, __pyx_k_dtype_is_object, sizeof(__pyx_k_dtype_is_object), 0, 0, 1, 1}, + {&__pyx_n_s_e, __pyx_k_e, sizeof(__pyx_k_e), 0, 0, 1, 1}, + {&__pyx_kp_u_enable, __pyx_k_enable, sizeof(__pyx_k_enable), 0, 1, 0, 0}, + {&__pyx_n_s_encode, __pyx_k_encode, sizeof(__pyx_k_encode), 0, 0, 1, 1}, + {&__pyx_n_s_enumerate, __pyx_k_enumerate, sizeof(__pyx_k_enumerate), 0, 0, 1, 1}, + {&__pyx_n_s_error, __pyx_k_error, sizeof(__pyx_k_error), 0, 0, 1, 1}, + {&__pyx_n_s_flags, __pyx_k_flags, sizeof(__pyx_k_flags), 0, 0, 1, 1}, + {&__pyx_n_s_float32, __pyx_k_float32, sizeof(__pyx_k_float32), 0, 0, 1, 1}, + {&__pyx_n_s_format, __pyx_k_format, sizeof(__pyx_k_format), 0, 0, 1, 1}, + {&__pyx_n_s_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 0, 1, 1}, + {&__pyx_n_u_fortran, __pyx_k_fortran, sizeof(__pyx_k_fortran), 0, 1, 0, 1}, + {&__pyx_kp_u_gc, __pyx_k_gc, sizeof(__pyx_k_gc), 0, 1, 0, 0}, + {&__pyx_n_s_getstate, __pyx_k_getstate, sizeof(__pyx_k_getstate), 0, 0, 1, 1}, + {&__pyx_kp_u_got, __pyx_k_got, sizeof(__pyx_k_got), 0, 1, 0, 0}, + {&__pyx_kp_u_got_differing_extents_in_dimensi, __pyx_k_got_differing_extents_in_dimensi, sizeof(__pyx_k_got_differing_extents_in_dimensi), 0, 1, 0, 0}, + {&__pyx_n_s_id, __pyx_k_id, sizeof(__pyx_k_id), 0, 0, 1, 1}, + {&__pyx_n_s_iidx, __pyx_k_iidx, sizeof(__pyx_k_iidx), 0, 0, 1, 1}, + {&__pyx_n_s_import, __pyx_k_import, sizeof(__pyx_k_import), 0, 0, 1, 1}, + {&__pyx_n_s_inc, __pyx_k_inc, sizeof(__pyx_k_inc), 0, 0, 1, 1}, + {&__pyx_n_s_index, __pyx_k_index, sizeof(__pyx_k_index), 0, 0, 1, 1}, + {&__pyx_n_s_initializing, __pyx_k_initializing, sizeof(__pyx_k_initializing), 0, 0, 1, 1}, + {&__pyx_n_s_inputList, __pyx_k_inputList, sizeof(__pyx_k_inputList), 0, 0, 1, 1}, + {&__pyx_n_s_inputList_np, __pyx_k_inputList_np, sizeof(__pyx_k_inputList_np), 0, 0, 1, 1}, + {&__pyx_n_s_inter, __pyx_k_inter, sizeof(__pyx_k_inter), 0, 0, 1, 1}, + {&__pyx_n_s_inter_areas, __pyx_k_inter_areas, sizeof(__pyx_k_inter_areas), 0, 0, 1, 1}, + {&__pyx_n_s_is_coroutine, __pyx_k_is_coroutine, sizeof(__pyx_k_is_coroutine), 0, 0, 1, 1}, + {&__pyx_kp_u_isenabled, __pyx_k_isenabled, sizeof(__pyx_k_isenabled), 0, 1, 0, 0}, + {&__pyx_n_s_itemsize, __pyx_k_itemsize, sizeof(__pyx_k_itemsize), 0, 0, 1, 1}, + {&__pyx_kp_s_itemsize_0_for_cython_array, __pyx_k_itemsize_0_for_cython_array, sizeof(__pyx_k_itemsize_0_for_cython_array), 0, 0, 1, 0}, + {&__pyx_n_s_k1, __pyx_k_k1, sizeof(__pyx_k_k1), 0, 0, 1, 1}, + {&__pyx_n_s_k2, __pyx_k_k2, sizeof(__pyx_k_k2), 0, 0, 1, 1}, + {&__pyx_n_s_lenc, __pyx_k_lenc, sizeof(__pyx_k_lenc), 0, 0, 1, 1}, + {&__pyx_n_s_main, __pyx_k_main, sizeof(__pyx_k_main), 0, 0, 1, 1}, + {&__pyx_n_s_memview, __pyx_k_memview, sizeof(__pyx_k_memview), 0, 0, 1, 1}, + {&__pyx_n_s_mode, __pyx_k_mode, sizeof(__pyx_k_mode), 0, 0, 1, 1}, + {&__pyx_n_s_n1, __pyx_k_n1, sizeof(__pyx_k_n1), 0, 0, 1, 1}, + {&__pyx_n_s_n2, __pyx_k_n2, sizeof(__pyx_k_n2), 0, 0, 1, 1}, + {&__pyx_n_s_n3, __pyx_k_n3, sizeof(__pyx_k_n3), 0, 0, 1, 1}, + {&__pyx_n_s_name, __pyx_k_name, sizeof(__pyx_k_name), 0, 0, 1, 1}, + {&__pyx_n_s_name_2, __pyx_k_name_2, sizeof(__pyx_k_name_2), 0, 0, 1, 1}, + {&__pyx_n_s_ndim, __pyx_k_ndim, sizeof(__pyx_k_ndim), 0, 0, 1, 1}, + {&__pyx_n_s_new, __pyx_k_new, sizeof(__pyx_k_new), 0, 0, 1, 1}, + {&__pyx_n_s_ninput_list, __pyx_k_ninput_list, sizeof(__pyx_k_ninput_list), 0, 0, 1, 1}, + {&__pyx_n_s_ninter, __pyx_k_ninter, sizeof(__pyx_k_ninter), 0, 0, 1, 1}, + {&__pyx_kp_s_no_default___reduce___due_to_non, __pyx_k_no_default___reduce___due_to_non, sizeof(__pyx_k_no_default___reduce___due_to_non), 0, 0, 1, 0}, + {&__pyx_n_s_non_rot_inter_areas, __pyx_k_non_rot_inter_areas, sizeof(__pyx_k_non_rot_inter_areas), 0, 0, 1, 1}, + {&__pyx_n_s_noutput_list, __pyx_k_noutput_list, sizeof(__pyx_k_noutput_list), 0, 0, 1, 1}, + {&__pyx_n_s_np, __pyx_k_np, sizeof(__pyx_k_np), 0, 0, 1, 1}, + {&__pyx_n_s_num_clip_points, __pyx_k_num_clip_points, sizeof(__pyx_k_num_clip_points), 0, 0, 1, 1}, + {&__pyx_n_s_num_intersect_points, __pyx_k_num_intersect_points, sizeof(__pyx_k_num_intersect_points), 0, 0, 1, 1}, + {&__pyx_n_s_numpy, __pyx_k_numpy, sizeof(__pyx_k_numpy), 0, 0, 1, 1}, + {&__pyx_kp_s_numpy_core_multiarray_failed_to, __pyx_k_numpy_core_multiarray_failed_to, sizeof(__pyx_k_numpy_core_multiarray_failed_to), 0, 0, 1, 0}, + {&__pyx_kp_s_numpy_core_umath_failed_to_impor, __pyx_k_numpy_core_umath_failed_to_impor, sizeof(__pyx_k_numpy_core_umath_failed_to_impor), 0, 0, 1, 0}, + {&__pyx_n_s_nums_k2, __pyx_k_nums_k2, sizeof(__pyx_k_nums_k2), 0, 0, 1, 1}, + {&__pyx_n_s_obj, __pyx_k_obj, sizeof(__pyx_k_obj), 0, 0, 1, 1}, + {&__pyx_n_s_outputList, __pyx_k_outputList, sizeof(__pyx_k_outputList), 0, 0, 1, 1}, + {&__pyx_n_s_outputList_np, __pyx_k_outputList_np, sizeof(__pyx_k_outputList_np), 0, 0, 1, 1}, + {&__pyx_n_s_pack, __pyx_k_pack, sizeof(__pyx_k_pack), 0, 0, 1, 1}, + {&__pyx_n_s_pickle, __pyx_k_pickle, sizeof(__pyx_k_pickle), 0, 0, 1, 1}, + {&__pyx_n_s_polygon_clip_float, __pyx_k_polygon_clip_float, sizeof(__pyx_k_polygon_clip_float), 0, 0, 1, 1}, + {&__pyx_n_s_polygon_clip_unnest, __pyx_k_polygon_clip_unnest, sizeof(__pyx_k_polygon_clip_unnest), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_PickleError, __pyx_k_pyx_PickleError, sizeof(__pyx_k_pyx_PickleError), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_checksum, __pyx_k_pyx_checksum, sizeof(__pyx_k_pyx_checksum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_result, __pyx_k_pyx_result, sizeof(__pyx_k_pyx_result), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_state, __pyx_k_pyx_state, sizeof(__pyx_k_pyx_state), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_type, __pyx_k_pyx_type, sizeof(__pyx_k_pyx_type), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_unpickle_Enum, __pyx_k_pyx_unpickle_Enum, sizeof(__pyx_k_pyx_unpickle_Enum), 0, 0, 1, 1}, + {&__pyx_n_s_pyx_vtable, __pyx_k_pyx_vtable, sizeof(__pyx_k_pyx_vtable), 0, 0, 1, 1}, + {&__pyx_n_s_range, __pyx_k_range, sizeof(__pyx_k_range), 0, 0, 1, 1}, + {&__pyx_n_s_rect1, __pyx_k_rect1, sizeof(__pyx_k_rect1), 0, 0, 1, 1}, + {&__pyx_n_s_rect2, __pyx_k_rect2, sizeof(__pyx_k_rect2), 0, 0, 1, 1}, + {&__pyx_n_s_reduce, __pyx_k_reduce, sizeof(__pyx_k_reduce), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_cython, __pyx_k_reduce_cython, sizeof(__pyx_k_reduce_cython), 0, 0, 1, 1}, + {&__pyx_n_s_reduce_ex, __pyx_k_reduce_ex, sizeof(__pyx_k_reduce_ex), 0, 0, 1, 1}, + {&__pyx_n_s_register, __pyx_k_register, sizeof(__pyx_k_register), 0, 0, 1, 1}, + {&__pyx_n_s_roll, __pyx_k_roll, sizeof(__pyx_k_roll), 0, 0, 1, 1}, + {&__pyx_n_s_s, __pyx_k_s, sizeof(__pyx_k_s), 0, 0, 1, 1}, + {&__pyx_n_s_setstate, __pyx_k_setstate, sizeof(__pyx_k_setstate), 0, 0, 1, 1}, + {&__pyx_n_s_setstate_cython, __pyx_k_setstate_cython, sizeof(__pyx_k_setstate_cython), 0, 0, 1, 1}, + {&__pyx_n_s_shape, __pyx_k_shape, sizeof(__pyx_k_shape), 0, 0, 1, 1}, + {&__pyx_n_s_size, __pyx_k_size, sizeof(__pyx_k_size), 0, 0, 1, 1}, + {&__pyx_n_s_spec, __pyx_k_spec, sizeof(__pyx_k_spec), 0, 0, 1, 1}, + {&__pyx_n_s_start, __pyx_k_start, sizeof(__pyx_k_start), 0, 0, 1, 1}, + {&__pyx_n_s_step, __pyx_k_step, sizeof(__pyx_k_step), 0, 0, 1, 1}, + {&__pyx_n_s_stop, __pyx_k_stop, sizeof(__pyx_k_stop), 0, 0, 1, 1}, + {&__pyx_kp_s_strided_and_direct, __pyx_k_strided_and_direct, sizeof(__pyx_k_strided_and_direct), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_direct_or_indirect, __pyx_k_strided_and_direct_or_indirect, sizeof(__pyx_k_strided_and_direct_or_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_strided_and_indirect, __pyx_k_strided_and_indirect, sizeof(__pyx_k_strided_and_indirect), 0, 0, 1, 0}, + {&__pyx_kp_s_stringsource, __pyx_k_stringsource, sizeof(__pyx_k_stringsource), 0, 0, 1, 0}, + {&__pyx_n_s_struct, __pyx_k_struct, sizeof(__pyx_k_struct), 0, 0, 1, 1}, + {&__pyx_n_s_subjectPolygon, __pyx_k_subjectPolygon, sizeof(__pyx_k_subjectPolygon), 0, 0, 1, 1}, + {&__pyx_n_s_subjectVertex, __pyx_k_subjectVertex, sizeof(__pyx_k_subjectVertex), 0, 0, 1, 1}, + {&__pyx_n_s_sys, __pyx_k_sys, sizeof(__pyx_k_sys), 0, 0, 1, 1}, + {&__pyx_n_s_test, __pyx_k_test, sizeof(__pyx_k_test), 0, 0, 1, 1}, + {&__pyx_kp_s_unable_to_allocate_array_data, __pyx_k_unable_to_allocate_array_data, sizeof(__pyx_k_unable_to_allocate_array_data), 0, 0, 1, 0}, + {&__pyx_kp_s_unable_to_allocate_shape_and_str, __pyx_k_unable_to_allocate_shape_and_str, sizeof(__pyx_k_unable_to_allocate_shape_and_str), 0, 0, 1, 0}, + {&__pyx_n_s_unpack, __pyx_k_unpack, sizeof(__pyx_k_unpack), 0, 0, 1, 1}, + {&__pyx_n_s_update, __pyx_k_update, sizeof(__pyx_k_update), 0, 0, 1, 1}, + {&__pyx_n_s_version_info, __pyx_k_version_info, sizeof(__pyx_k_version_info), 0, 0, 1, 1}, + {&__pyx_n_s_x, __pyx_k_x, sizeof(__pyx_k_x), 0, 0, 1, 1}, + {&__pyx_n_s_xs, __pyx_k_xs, sizeof(__pyx_k_xs), 0, 0, 1, 1}, + {&__pyx_n_s_ys, __pyx_k_ys, sizeof(__pyx_k_ys), 0, 0, 1, 1}, + {&__pyx_n_s_zeros, __pyx_k_zeros, sizeof(__pyx_k_zeros), 0, 0, 1, 1}, + {0, 0, 0, 0, 0, 0, 0} + }; + return __Pyx_InitStrings(__pyx_string_tab); +} +/* #### Code section: cached_builtins ### */ +static CYTHON_SMALL_CODE int __Pyx_InitCachedBuiltins(void) { + __pyx_builtin_range = __Pyx_GetBuiltinName(__pyx_n_s_range); if (!__pyx_builtin_range) __PYX_ERR(0, 47, __pyx_L1_error) + __pyx_builtin___import__ = __Pyx_GetBuiltinName(__pyx_n_s_import); if (!__pyx_builtin___import__) __PYX_ERR(1, 100, __pyx_L1_error) + __pyx_builtin_ValueError = __Pyx_GetBuiltinName(__pyx_n_s_ValueError); if (!__pyx_builtin_ValueError) __PYX_ERR(1, 141, __pyx_L1_error) + __pyx_builtin_MemoryError = __Pyx_GetBuiltinName(__pyx_n_s_MemoryError); if (!__pyx_builtin_MemoryError) __PYX_ERR(1, 156, __pyx_L1_error) + __pyx_builtin_enumerate = __Pyx_GetBuiltinName(__pyx_n_s_enumerate); if (!__pyx_builtin_enumerate) __PYX_ERR(1, 159, __pyx_L1_error) + __pyx_builtin_TypeError = __Pyx_GetBuiltinName(__pyx_n_s_TypeError); if (!__pyx_builtin_TypeError) __PYX_ERR(1, 2, __pyx_L1_error) + __pyx_builtin_AssertionError = __Pyx_GetBuiltinName(__pyx_n_s_AssertionError); if (!__pyx_builtin_AssertionError) __PYX_ERR(1, 373, __pyx_L1_error) + __pyx_builtin_Ellipsis = __Pyx_GetBuiltinName(__pyx_n_s_Ellipsis); if (!__pyx_builtin_Ellipsis) __PYX_ERR(1, 408, __pyx_L1_error) + __pyx_builtin_id = __Pyx_GetBuiltinName(__pyx_n_s_id); if (!__pyx_builtin_id) __PYX_ERR(1, 618, __pyx_L1_error) + __pyx_builtin_IndexError = __Pyx_GetBuiltinName(__pyx_n_s_IndexError); if (!__pyx_builtin_IndexError) __PYX_ERR(1, 914, __pyx_L1_error) + __pyx_builtin_ImportError = __Pyx_GetBuiltinName(__pyx_n_s_ImportError); if (!__pyx_builtin_ImportError) __PYX_ERR(2, 986, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: cached_constants ### */ + +static CYTHON_SMALL_CODE int __Pyx_InitCachedConstants(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_InitCachedConstants", 0); + + /* "View.MemoryView":582 + * def suboffsets(self): + * if self.view.suboffsets == NULL: + * return (-1,) * self.view.ndim # <<<<<<<<<<<<<< + * + * return tuple([suboffset for suboffset in self.view.suboffsets[:self.view.ndim]]) + */ + __pyx_tuple__4 = PyTuple_New(1); if (unlikely(!__pyx_tuple__4)) __PYX_ERR(1, 582, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__4); + __Pyx_INCREF(__pyx_int_neg_1); + __Pyx_GIVEREF(__pyx_int_neg_1); + if (__Pyx_PyTuple_SET_ITEM(__pyx_tuple__4, 0, __pyx_int_neg_1)) __PYX_ERR(1, 582, __pyx_L1_error); + __Pyx_GIVEREF(__pyx_tuple__4); + + /* "View.MemoryView":679 + * tup = index if isinstance(index, tuple) else (index,) + * + * result = [slice(None)] * ndim # <<<<<<<<<<<<<< + * have_slices = False + * seen_ellipsis = False + */ + __pyx_slice__5 = PySlice_New(Py_None, Py_None, Py_None); if (unlikely(!__pyx_slice__5)) __PYX_ERR(1, 679, __pyx_L1_error) + __Pyx_GOTREF(__pyx_slice__5); + __Pyx_GIVEREF(__pyx_slice__5); + + /* "(tree fragment)":4 + * cdef object __pyx_PickleError + * cdef object __pyx_result + * if __pyx_checksum not in (0x82a3537, 0x6ae9995, 0xb068931): # <<<<<<<<<<<<<< + * from pickle import PickleError as __pyx_PickleError + * raise __pyx_PickleError, "Incompatible checksums (0x%x vs (0x82a3537, 0x6ae9995, 0xb068931) = (name))" % __pyx_checksum + */ + __pyx_tuple__8 = PyTuple_Pack(3, __pyx_int_136983863, __pyx_int_112105877, __pyx_int_184977713); if (unlikely(!__pyx_tuple__8)) __PYX_ERR(1, 4, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__8); + __Pyx_GIVEREF(__pyx_tuple__8); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":986 + * __pyx_import_array() + * except Exception: + * raise ImportError("numpy.core.multiarray failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_umath() except -1: + */ + __pyx_tuple__9 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_multiarray_failed_to); if (unlikely(!__pyx_tuple__9)) __PYX_ERR(2, 986, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__9); + __Pyx_GIVEREF(__pyx_tuple__9); + + /* "../../../../../opt/conda/envs/csj-3dv/lib/python3.8/site-packages/numpy/__init__.cython-30.pxd":992 + * _import_umath() + * except Exception: + * raise ImportError("numpy.core.umath failed to import") # <<<<<<<<<<<<<< + * + * cdef inline int import_ufunc() except -1: + */ + __pyx_tuple__10 = PyTuple_Pack(1, __pyx_kp_s_numpy_core_umath_failed_to_impor); if (unlikely(!__pyx_tuple__10)) __PYX_ERR(2, 992, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__10); + __Pyx_GIVEREF(__pyx_tuple__10); + + /* "View.MemoryView":100 + * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" + * try: + * if __import__("sys").version_info >= (3, 3): # <<<<<<<<<<<<<< + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + * else: + */ + __pyx_tuple__11 = PyTuple_Pack(1, __pyx_n_s_sys); if (unlikely(!__pyx_tuple__11)) __PYX_ERR(1, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__11); + __Pyx_GIVEREF(__pyx_tuple__11); + __pyx_tuple__12 = PyTuple_Pack(2, __pyx_int_3, __pyx_int_3); if (unlikely(!__pyx_tuple__12)) __PYX_ERR(1, 100, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__12); + __Pyx_GIVEREF(__pyx_tuple__12); + + /* "View.MemoryView":101 + * try: + * if __import__("sys").version_info >= (3, 3): + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence # <<<<<<<<<<<<<< + * else: + * __pyx_collections_abc_Sequence = __import__("collections").Sequence + */ + __pyx_tuple__13 = PyTuple_Pack(1, __pyx_kp_s_collections_abc); if (unlikely(!__pyx_tuple__13)) __PYX_ERR(1, 101, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__13); + __Pyx_GIVEREF(__pyx_tuple__13); + + /* "View.MemoryView":103 + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + * else: + * __pyx_collections_abc_Sequence = __import__("collections").Sequence # <<<<<<<<<<<<<< + * except: + * + */ + __pyx_tuple__14 = PyTuple_Pack(1, __pyx_n_s_collections); if (unlikely(!__pyx_tuple__14)) __PYX_ERR(1, 103, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__14); + __Pyx_GIVEREF(__pyx_tuple__14); + + /* "View.MemoryView":309 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_tuple__15 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct_or_indirect); if (unlikely(!__pyx_tuple__15)) __PYX_ERR(1, 309, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__15); + __Pyx_GIVEREF(__pyx_tuple__15); + + /* "View.MemoryView":310 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_tuple__16 = PyTuple_Pack(1, __pyx_kp_s_strided_and_direct); if (unlikely(!__pyx_tuple__16)) __PYX_ERR(1, 310, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__16); + __Pyx_GIVEREF(__pyx_tuple__16); + + /* "View.MemoryView":311 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__17 = PyTuple_Pack(1, __pyx_kp_s_strided_and_indirect); if (unlikely(!__pyx_tuple__17)) __PYX_ERR(1, 311, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__17); + __Pyx_GIVEREF(__pyx_tuple__17); + + /* "View.MemoryView":314 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_tuple__18 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_direct); if (unlikely(!__pyx_tuple__18)) __PYX_ERR(1, 314, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__18); + __Pyx_GIVEREF(__pyx_tuple__18); + + /* "View.MemoryView":315 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_tuple__19 = PyTuple_Pack(1, __pyx_kp_s_contiguous_and_indirect); if (unlikely(!__pyx_tuple__19)) __PYX_ERR(1, 315, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__19); + __Pyx_GIVEREF(__pyx_tuple__19); + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_tuple__20 = PyTuple_Pack(5, __pyx_n_s_pyx_type, __pyx_n_s_pyx_checksum, __pyx_n_s_pyx_state, __pyx_n_s_pyx_PickleError, __pyx_n_s_pyx_result); if (unlikely(!__pyx_tuple__20)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__20); + __Pyx_GIVEREF(__pyx_tuple__20); + __pyx_codeobj__21 = (PyObject*)__Pyx_PyCode_New(3, 0, 0, 5, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__20, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_stringsource, __pyx_n_s_pyx_unpickle_Enum, 1, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__21)) __PYX_ERR(1, 1, __pyx_L1_error) + + /* "box_intersection.pyx":11 + * FLOAT = np.float32 + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def computeIntersection(cp1, cp2, s, e): + */ + __pyx_tuple__22 = PyTuple_Pack(9, __pyx_n_s_cp1, __pyx_n_s_cp2, __pyx_n_s_s, __pyx_n_s_e, __pyx_n_s_dc, __pyx_n_s_dp, __pyx_n_s_n1, __pyx_n_s_n2, __pyx_n_s_n3); if (unlikely(!__pyx_tuple__22)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__22); + __Pyx_GIVEREF(__pyx_tuple__22); + __pyx_codeobj__23 = (PyObject*)__Pyx_PyCode_New(4, 0, 0, 9, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__22, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_box_intersection_pyx, __pyx_n_s_computeIntersection, 11, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__23)) __PYX_ERR(0, 11, __pyx_L1_error) + + /* "box_intersection.pyx":26 + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * def polygon_clip_unnest(float [:, :] subjectPolygon, float [:, :] clipPolygon): + * """ Clip a polygon with another polygon. + */ + __pyx_tuple__24 = PyTuple_Pack(15, __pyx_n_s_subjectPolygon, __pyx_n_s_clipPolygon, __pyx_n_s_outputList, __pyx_n_s_cp1, __pyx_n_s_lenc, __pyx_n_s_iidx, __pyx_n_s_cidx, __pyx_n_s_clipVertex, __pyx_n_s_cp2, __pyx_n_s_inputList, __pyx_n_s_s, __pyx_n_s_inc, __pyx_n_s_subjectVertex, __pyx_n_s_e, __pyx_n_s_x); if (unlikely(!__pyx_tuple__24)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__24); + __Pyx_GIVEREF(__pyx_tuple__24); + __pyx_codeobj__25 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 15, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__24, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_box_intersection_pyx, __pyx_n_s_polygon_clip_unnest, 26, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__25)) __PYX_ERR(0, 26, __pyx_L1_error) + + /* "box_intersection.pyx":115 + * return num_points + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def polygon_clip_float(float [:, :] subjectPolygon, float [:, :] clipPolygon): + */ + __pyx_tuple__26 = PyTuple_Pack(18, __pyx_n_s_subjectPolygon, __pyx_n_s_clipPolygon, __pyx_n_s_num_clip_points, __pyx_n_s_cp1, __pyx_n_s_MAX_INTERSECT_POINTS, __pyx_n_s_num_intersect_points, __pyx_n_s_outputList_np, __pyx_n_s_outputList, __pyx_n_s_inputList_np, __pyx_n_s_inputList, __pyx_n_s_noutput_list, __pyx_n_s_ninput_list, __pyx_n_s_iidx, __pyx_n_s_cidx, __pyx_n_s_clipVertex, __pyx_n_s_cp2, __pyx_n_s_s, __pyx_n_s_e); if (unlikely(!__pyx_tuple__26)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__26); + __Pyx_GIVEREF(__pyx_tuple__26); + __pyx_codeobj__27 = (PyObject*)__Pyx_PyCode_New(2, 0, 0, 18, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__26, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_box_intersection_pyx, __pyx_n_s_polygon_clip_float, 115, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__27)) __PYX_ERR(0, 115, __pyx_L1_error) + + /* "box_intersection.pyx":164 + * + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def box_intersection(float [:, :, :, :] rect1, + */ + __pyx_tuple__28 = PyTuple_Pack(18, __pyx_n_s_rect1, __pyx_n_s_rect2, __pyx_n_s_non_rot_inter_areas, __pyx_n_s_nums_k2, __pyx_n_s_inter_areas, __pyx_n_s_approximate, __pyx_n_s_B, __pyx_n_s_K1, __pyx_n_s_K2, __pyx_n_s_b, __pyx_n_s_k1, __pyx_n_s_k2, __pyx_n_s_inter, __pyx_n_s_ninter, __pyx_n_s_xs, __pyx_n_s_ys, __pyx_n_s_x, __pyx_n_s_x); if (unlikely(!__pyx_tuple__28)) __PYX_ERR(0, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_tuple__28); + __Pyx_GIVEREF(__pyx_tuple__28); + __pyx_codeobj__29 = (PyObject*)__Pyx_PyCode_New(6, 0, 0, 18, 0, CO_OPTIMIZED|CO_NEWLOCALS, __pyx_empty_bytes, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_tuple__28, __pyx_empty_tuple, __pyx_empty_tuple, __pyx_kp_s_box_intersection_pyx, __pyx_n_s_box_intersection, 164, __pyx_empty_bytes); if (unlikely(!__pyx_codeobj__29)) __PYX_ERR(0, 164, __pyx_L1_error) + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_RefNannyFinishContext(); + return -1; +} +/* #### Code section: init_constants ### */ + +static CYTHON_SMALL_CODE int __Pyx_InitConstants(void) { + __pyx_umethod_PyList_Type_clear.type = (PyObject*)&PyList_Type; + __pyx_umethod_PyList_Type_clear.method_name = &__pyx_n_s_clear; + __pyx_umethod_PyList_Type_copy.type = (PyObject*)&PyList_Type; + __pyx_umethod_PyList_Type_copy.method_name = &__pyx_n_s_copy; + if (__Pyx_CreateStringTabAndInitStrings() < 0) __PYX_ERR(0, 1, __pyx_L1_error); + __pyx_float_0_5 = PyFloat_FromDouble(0.5); if (unlikely(!__pyx_float_0_5)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_float_1_0 = PyFloat_FromDouble(1.0); if (unlikely(!__pyx_float_1_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_0 = PyInt_FromLong(0); if (unlikely(!__pyx_int_0)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_1 = PyInt_FromLong(1); if (unlikely(!__pyx_int_1)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_2 = PyInt_FromLong(2); if (unlikely(!__pyx_int_2)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_3 = PyInt_FromLong(3); if (unlikely(!__pyx_int_3)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_112105877 = PyInt_FromLong(112105877L); if (unlikely(!__pyx_int_112105877)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_136983863 = PyInt_FromLong(136983863L); if (unlikely(!__pyx_int_136983863)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_184977713 = PyInt_FromLong(184977713L); if (unlikely(!__pyx_int_184977713)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_int_neg_1 = PyInt_FromLong(-1); if (unlikely(!__pyx_int_neg_1)) __PYX_ERR(0, 1, __pyx_L1_error) + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: init_globals ### */ + +static CYTHON_SMALL_CODE int __Pyx_InitGlobals(void) { + /* AssertionsEnabled.init */ + if (likely(__Pyx_init_assertions_enabled() == 0)); else + +if (unlikely(PyErr_Occurred())) __PYX_ERR(0, 1, __pyx_L1_error) + + return 0; + __pyx_L1_error:; + return -1; +} +/* #### Code section: init_module ### */ + +static CYTHON_SMALL_CODE int __Pyx_modinit_global_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_export_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_init_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_type_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_variable_import_code(void); /*proto*/ +static CYTHON_SMALL_CODE int __Pyx_modinit_function_import_code(void); /*proto*/ + +static int __Pyx_modinit_global_init_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_global_init_code", 0); + /*--- Global init code ---*/ + __pyx_collections_abc_Sequence = Py_None; Py_INCREF(Py_None); + generic = Py_None; Py_INCREF(Py_None); + strided = Py_None; Py_INCREF(Py_None); + indirect = Py_None; Py_INCREF(Py_None); + contiguous = Py_None; Py_INCREF(Py_None); + indirect_contiguous = Py_None; Py_INCREF(Py_None); + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_variable_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_export_code", 0); + /*--- Variable export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_export_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_export_code", 0); + /*--- Function export code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_type_init_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_init_code", 0); + /*--- Type init code ---*/ + __pyx_vtabptr_array = &__pyx_vtable_array; + __pyx_vtable_array.get_memview = (PyObject *(*)(struct __pyx_array_obj *))__pyx_array_get_memview; + #if CYTHON_USE_TYPE_SPECS + __pyx_array_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_array_spec, NULL); if (unlikely(!__pyx_array_type)) __PYX_ERR(1, 114, __pyx_L1_error) + #if !CYTHON_COMPILING_IN_LIMITED_API + __pyx_array_type->tp_as_buffer = &__pyx_tp_as_buffer_array; + if (!__pyx_array_type->tp_as_buffer->bf_releasebuffer && __pyx_array_type->tp_base->tp_as_buffer && __pyx_array_type->tp_base->tp_as_buffer->bf_releasebuffer) { + __pyx_array_type->tp_as_buffer->bf_releasebuffer = __pyx_array_type->tp_base->tp_as_buffer->bf_releasebuffer; + } + #elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer) + /* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */ + #elif defined(_MSC_VER) + #pragma message ("The buffer protocol is not supported in the Limited C-API < 3.11.") + #else + #warning "The buffer protocol is not supported in the Limited C-API < 3.11." + #endif + if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_array_spec, __pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) + #else + __pyx_array_type = &__pyx_type___pyx_array; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + #endif + #if !CYTHON_USE_TYPE_SPECS + if (__Pyx_PyType_Ready(__pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) + #endif + #if PY_MAJOR_VERSION < 3 + __pyx_array_type->tp_print = 0; + #endif + if (__Pyx_SetVtable(__pyx_array_type, __pyx_vtabptr_array) < 0) __PYX_ERR(1, 114, __pyx_L1_error) + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_MergeVtables(__pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_setup_reduce((PyObject *) __pyx_array_type) < 0) __PYX_ERR(1, 114, __pyx_L1_error) + #endif + #if CYTHON_USE_TYPE_SPECS + __pyx_MemviewEnum_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_MemviewEnum_spec, NULL); if (unlikely(!__pyx_MemviewEnum_type)) __PYX_ERR(1, 302, __pyx_L1_error) + if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_MemviewEnum_spec, __pyx_MemviewEnum_type) < 0) __PYX_ERR(1, 302, __pyx_L1_error) + #else + __pyx_MemviewEnum_type = &__pyx_type___pyx_MemviewEnum; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + #endif + #if !CYTHON_USE_TYPE_SPECS + if (__Pyx_PyType_Ready(__pyx_MemviewEnum_type) < 0) __PYX_ERR(1, 302, __pyx_L1_error) + #endif + #if PY_MAJOR_VERSION < 3 + __pyx_MemviewEnum_type->tp_print = 0; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_MemviewEnum_type->tp_dictoffset && __pyx_MemviewEnum_type->tp_getattro == PyObject_GenericGetAttr)) { + __pyx_MemviewEnum_type->tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_setup_reduce((PyObject *) __pyx_MemviewEnum_type) < 0) __PYX_ERR(1, 302, __pyx_L1_error) + #endif + __pyx_vtabptr_memoryview = &__pyx_vtable_memoryview; + __pyx_vtable_memoryview.get_item_pointer = (char *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_get_item_pointer; + __pyx_vtable_memoryview.is_slice = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_is_slice; + __pyx_vtable_memoryview.setitem_slice_assignment = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_slice_assignment; + __pyx_vtable_memoryview.setitem_slice_assign_scalar = (PyObject *(*)(struct __pyx_memoryview_obj *, struct __pyx_memoryview_obj *, PyObject *))__pyx_memoryview_setitem_slice_assign_scalar; + __pyx_vtable_memoryview.setitem_indexed = (PyObject *(*)(struct __pyx_memoryview_obj *, PyObject *, PyObject *))__pyx_memoryview_setitem_indexed; + __pyx_vtable_memoryview.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryview_convert_item_to_object; + __pyx_vtable_memoryview.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryview_assign_item_from_object; + __pyx_vtable_memoryview._get_base = (PyObject *(*)(struct __pyx_memoryview_obj *))__pyx_memoryview__get_base; + #if CYTHON_USE_TYPE_SPECS + __pyx_memoryview_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_memoryview_spec, NULL); if (unlikely(!__pyx_memoryview_type)) __PYX_ERR(1, 337, __pyx_L1_error) + #if !CYTHON_COMPILING_IN_LIMITED_API + __pyx_memoryview_type->tp_as_buffer = &__pyx_tp_as_buffer_memoryview; + if (!__pyx_memoryview_type->tp_as_buffer->bf_releasebuffer && __pyx_memoryview_type->tp_base->tp_as_buffer && __pyx_memoryview_type->tp_base->tp_as_buffer->bf_releasebuffer) { + __pyx_memoryview_type->tp_as_buffer->bf_releasebuffer = __pyx_memoryview_type->tp_base->tp_as_buffer->bf_releasebuffer; + } + #elif defined(Py_bf_getbuffer) && defined(Py_bf_releasebuffer) + /* PY_VERSION_HEX >= 0x03090000 || Py_LIMITED_API >= 0x030B0000 */ + #elif defined(_MSC_VER) + #pragma message ("The buffer protocol is not supported in the Limited C-API < 3.11.") + #else + #warning "The buffer protocol is not supported in the Limited C-API < 3.11." + #endif + if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_memoryview_spec, __pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) + #else + __pyx_memoryview_type = &__pyx_type___pyx_memoryview; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + #endif + #if !CYTHON_USE_TYPE_SPECS + if (__Pyx_PyType_Ready(__pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) + #endif + #if PY_MAJOR_VERSION < 3 + __pyx_memoryview_type->tp_print = 0; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_memoryview_type->tp_dictoffset && __pyx_memoryview_type->tp_getattro == PyObject_GenericGetAttr)) { + __pyx_memoryview_type->tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + #endif + if (__Pyx_SetVtable(__pyx_memoryview_type, __pyx_vtabptr_memoryview) < 0) __PYX_ERR(1, 337, __pyx_L1_error) + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_MergeVtables(__pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_setup_reduce((PyObject *) __pyx_memoryview_type) < 0) __PYX_ERR(1, 337, __pyx_L1_error) + #endif + __pyx_vtabptr__memoryviewslice = &__pyx_vtable__memoryviewslice; + __pyx_vtable__memoryviewslice.__pyx_base = *__pyx_vtabptr_memoryview; + __pyx_vtable__memoryviewslice.__pyx_base.convert_item_to_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *))__pyx_memoryviewslice_convert_item_to_object; + __pyx_vtable__memoryviewslice.__pyx_base.assign_item_from_object = (PyObject *(*)(struct __pyx_memoryview_obj *, char *, PyObject *))__pyx_memoryviewslice_assign_item_from_object; + __pyx_vtable__memoryviewslice.__pyx_base._get_base = (PyObject *(*)(struct __pyx_memoryview_obj *))__pyx_memoryviewslice__get_base; + #if CYTHON_USE_TYPE_SPECS + __pyx_t_1 = PyTuple_Pack(1, (PyObject *)__pyx_memoryview_type); if (unlikely(!__pyx_t_1)) __PYX_ERR(1, 952, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_memoryviewslice_type = (PyTypeObject *) __Pyx_PyType_FromModuleAndSpec(__pyx_m, &__pyx_type___pyx_memoryviewslice_spec, __pyx_t_1); + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + if (unlikely(!__pyx_memoryviewslice_type)) __PYX_ERR(1, 952, __pyx_L1_error) + if (__Pyx_fix_up_extension_type_from_spec(&__pyx_type___pyx_memoryviewslice_spec, __pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) + #else + __pyx_memoryviewslice_type = &__pyx_type___pyx_memoryviewslice; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + __pyx_memoryviewslice_type->tp_base = __pyx_memoryview_type; + #endif + #if !CYTHON_USE_TYPE_SPECS + if (__Pyx_PyType_Ready(__pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) + #endif + #if PY_MAJOR_VERSION < 3 + __pyx_memoryviewslice_type->tp_print = 0; + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if ((CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP) && likely(!__pyx_memoryviewslice_type->tp_dictoffset && __pyx_memoryviewslice_type->tp_getattro == PyObject_GenericGetAttr)) { + __pyx_memoryviewslice_type->tp_getattro = __Pyx_PyObject_GenericGetAttr; + } + #endif + if (__Pyx_SetVtable(__pyx_memoryviewslice_type, __pyx_vtabptr__memoryviewslice) < 0) __PYX_ERR(1, 952, __pyx_L1_error) + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_MergeVtables(__pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) + #endif + #if !CYTHON_COMPILING_IN_LIMITED_API + if (__Pyx_setup_reduce((PyObject *) __pyx_memoryviewslice_type) < 0) __PYX_ERR(1, 952, __pyx_L1_error) + #endif + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_type_import_code(void) { + __Pyx_RefNannyDeclarations + PyObject *__pyx_t_1 = NULL; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannySetupContext("__Pyx_modinit_type_import_code", 0); + /*--- Type import code ---*/ + __pyx_t_1 = PyImport_ImportModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_t_1)) __PYX_ERR(3, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_7cpython_4type_type = __Pyx_ImportType_3_0_2(__pyx_t_1, __Pyx_BUILTIN_MODULE_NAME, "type", + #if defined(PYPY_VERSION_NUM) && PYPY_VERSION_NUM < 0x050B0000 + sizeof(PyTypeObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyTypeObject), + #elif CYTHON_COMPILING_IN_LIMITED_API + sizeof(PyTypeObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyTypeObject), + #else + sizeof(PyHeapTypeObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyHeapTypeObject), + #endif + __Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_7cpython_4type_type) __PYX_ERR(3, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __pyx_t_1 = PyImport_ImportModule("numpy"); if (unlikely(!__pyx_t_1)) __PYX_ERR(2, 202, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_1); + __pyx_ptype_5numpy_dtype = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "dtype", sizeof(PyArray_Descr), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyArray_Descr),__Pyx_ImportType_CheckSize_Ignore_3_0_2); if (!__pyx_ptype_5numpy_dtype) __PYX_ERR(2, 202, __pyx_L1_error) + __pyx_ptype_5numpy_flatiter = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "flatiter", sizeof(PyArrayIterObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyArrayIterObject),__Pyx_ImportType_CheckSize_Ignore_3_0_2); if (!__pyx_ptype_5numpy_flatiter) __PYX_ERR(2, 225, __pyx_L1_error) + __pyx_ptype_5numpy_broadcast = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "broadcast", sizeof(PyArrayMultiIterObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyArrayMultiIterObject),__Pyx_ImportType_CheckSize_Ignore_3_0_2); if (!__pyx_ptype_5numpy_broadcast) __PYX_ERR(2, 229, __pyx_L1_error) + __pyx_ptype_5numpy_ndarray = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "ndarray", sizeof(PyArrayObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyArrayObject),__Pyx_ImportType_CheckSize_Ignore_3_0_2); if (!__pyx_ptype_5numpy_ndarray) __PYX_ERR(2, 238, __pyx_L1_error) + __pyx_ptype_5numpy_generic = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "generic", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_generic) __PYX_ERR(2, 812, __pyx_L1_error) + __pyx_ptype_5numpy_number = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "number", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_number) __PYX_ERR(2, 814, __pyx_L1_error) + __pyx_ptype_5numpy_integer = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "integer", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_integer) __PYX_ERR(2, 816, __pyx_L1_error) + __pyx_ptype_5numpy_signedinteger = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "signedinteger", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_signedinteger) __PYX_ERR(2, 818, __pyx_L1_error) + __pyx_ptype_5numpy_unsignedinteger = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "unsignedinteger", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_unsignedinteger) __PYX_ERR(2, 820, __pyx_L1_error) + __pyx_ptype_5numpy_inexact = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "inexact", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_inexact) __PYX_ERR(2, 822, __pyx_L1_error) + __pyx_ptype_5numpy_floating = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "floating", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_floating) __PYX_ERR(2, 824, __pyx_L1_error) + __pyx_ptype_5numpy_complexfloating = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "complexfloating", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_complexfloating) __PYX_ERR(2, 826, __pyx_L1_error) + __pyx_ptype_5numpy_flexible = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "flexible", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_flexible) __PYX_ERR(2, 828, __pyx_L1_error) + __pyx_ptype_5numpy_character = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "character", sizeof(PyObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyObject),__Pyx_ImportType_CheckSize_Warn_3_0_2); if (!__pyx_ptype_5numpy_character) __PYX_ERR(2, 830, __pyx_L1_error) + __pyx_ptype_5numpy_ufunc = __Pyx_ImportType_3_0_2(__pyx_t_1, "numpy", "ufunc", sizeof(PyUFuncObject), __PYX_GET_STRUCT_ALIGNMENT_3_0_2(PyUFuncObject),__Pyx_ImportType_CheckSize_Ignore_3_0_2); if (!__pyx_ptype_5numpy_ufunc) __PYX_ERR(2, 868, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_RefNannyFinishContext(); + return 0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_1); + __Pyx_RefNannyFinishContext(); + return -1; +} + +static int __Pyx_modinit_variable_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_variable_import_code", 0); + /*--- Variable import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + +static int __Pyx_modinit_function_import_code(void) { + __Pyx_RefNannyDeclarations + __Pyx_RefNannySetupContext("__Pyx_modinit_function_import_code", 0); + /*--- Function import code ---*/ + __Pyx_RefNannyFinishContext(); + return 0; +} + + +#if PY_MAJOR_VERSION >= 3 +#if CYTHON_PEP489_MULTI_PHASE_INIT +static PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def); /*proto*/ +static int __pyx_pymod_exec_box_intersection(PyObject* module); /*proto*/ +static PyModuleDef_Slot __pyx_moduledef_slots[] = { + {Py_mod_create, (void*)__pyx_pymod_create}, + {Py_mod_exec, (void*)__pyx_pymod_exec_box_intersection}, + {0, NULL} +}; +#endif + +#ifdef __cplusplus +namespace { + struct PyModuleDef __pyx_moduledef = + #else + static struct PyModuleDef __pyx_moduledef = + #endif + { + PyModuleDef_HEAD_INIT, + "box_intersection", + 0, /* m_doc */ + #if CYTHON_PEP489_MULTI_PHASE_INIT + 0, /* m_size */ + #elif CYTHON_USE_MODULE_STATE + sizeof(__pyx_mstate), /* m_size */ + #else + -1, /* m_size */ + #endif + __pyx_methods /* m_methods */, + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_moduledef_slots, /* m_slots */ + #else + NULL, /* m_reload */ + #endif + #if CYTHON_USE_MODULE_STATE + __pyx_m_traverse, /* m_traverse */ + __pyx_m_clear, /* m_clear */ + NULL /* m_free */ + #else + NULL, /* m_traverse */ + NULL, /* m_clear */ + NULL /* m_free */ + #endif + }; + #ifdef __cplusplus +} /* anonymous namespace */ +#endif +#endif + +#ifndef CYTHON_NO_PYINIT_EXPORT +#define __Pyx_PyMODINIT_FUNC PyMODINIT_FUNC +#elif PY_MAJOR_VERSION < 3 +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" void +#else +#define __Pyx_PyMODINIT_FUNC void +#endif +#else +#ifdef __cplusplus +#define __Pyx_PyMODINIT_FUNC extern "C" PyObject * +#else +#define __Pyx_PyMODINIT_FUNC PyObject * +#endif +#endif + + +#if PY_MAJOR_VERSION < 3 +__Pyx_PyMODINIT_FUNC initbox_intersection(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC initbox_intersection(void) +#else +__Pyx_PyMODINIT_FUNC PyInit_box_intersection(void) CYTHON_SMALL_CODE; /*proto*/ +__Pyx_PyMODINIT_FUNC PyInit_box_intersection(void) +#if CYTHON_PEP489_MULTI_PHASE_INIT +{ + return PyModuleDef_Init(&__pyx_moduledef); +} +static CYTHON_SMALL_CODE int __Pyx_check_single_interpreter(void) { + #if PY_VERSION_HEX >= 0x030700A1 + static PY_INT64_T main_interpreter_id = -1; + PY_INT64_T current_id = PyInterpreterState_GetID(PyThreadState_Get()->interp); + if (main_interpreter_id == -1) { + main_interpreter_id = current_id; + return (unlikely(current_id == -1)) ? -1 : 0; + } else if (unlikely(main_interpreter_id != current_id)) + #else + static PyInterpreterState *main_interpreter = NULL; + PyInterpreterState *current_interpreter = PyThreadState_Get()->interp; + if (!main_interpreter) { + main_interpreter = current_interpreter; + } else if (unlikely(main_interpreter != current_interpreter)) + #endif + { + PyErr_SetString( + PyExc_ImportError, + "Interpreter change detected - this module can only be loaded into one interpreter per process."); + return -1; + } + return 0; +} +#if CYTHON_COMPILING_IN_LIMITED_API +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *module, const char* from_name, const char* to_name, int allow_none) +#else +static CYTHON_SMALL_CODE int __Pyx_copy_spec_to_module(PyObject *spec, PyObject *moddict, const char* from_name, const char* to_name, int allow_none) +#endif +{ + PyObject *value = PyObject_GetAttrString(spec, from_name); + int result = 0; + if (likely(value)) { + if (allow_none || value != Py_None) { +#if CYTHON_COMPILING_IN_LIMITED_API + result = PyModule_AddObject(module, to_name, value); +#else + result = PyDict_SetItemString(moddict, to_name, value); +#endif + } + Py_DECREF(value); + } else if (PyErr_ExceptionMatches(PyExc_AttributeError)) { + PyErr_Clear(); + } else { + result = -1; + } + return result; +} +static CYTHON_SMALL_CODE PyObject* __pyx_pymod_create(PyObject *spec, PyModuleDef *def) { + PyObject *module = NULL, *moddict, *modname; + CYTHON_UNUSED_VAR(def); + if (__Pyx_check_single_interpreter()) + return NULL; + if (__pyx_m) + return __Pyx_NewRef(__pyx_m); + modname = PyObject_GetAttrString(spec, "name"); + if (unlikely(!modname)) goto bad; + module = PyModule_NewObject(modname); + Py_DECREF(modname); + if (unlikely(!module)) goto bad; +#if CYTHON_COMPILING_IN_LIMITED_API + moddict = module; +#else + moddict = PyModule_GetDict(module); + if (unlikely(!moddict)) goto bad; +#endif + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "loader", "__loader__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "origin", "__file__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "parent", "__package__", 1) < 0)) goto bad; + if (unlikely(__Pyx_copy_spec_to_module(spec, moddict, "submodule_search_locations", "__path__", 0) < 0)) goto bad; + return module; +bad: + Py_XDECREF(module); + return NULL; +} + + +static CYTHON_SMALL_CODE int __pyx_pymod_exec_box_intersection(PyObject *__pyx_pyinit_module) +#endif +#endif +{ + int stringtab_initialized = 0; + #if CYTHON_USE_MODULE_STATE + int pystate_addmodule_run = 0; + #endif + PyObject *__pyx_t_1 = NULL; + PyObject *__pyx_t_2 = NULL; + PyObject *__pyx_t_3 = NULL; + PyObject *__pyx_t_4 = NULL; + PyObject *__pyx_t_5 = NULL; + int __pyx_t_6; + PyObject *__pyx_t_7 = NULL; + static PyThread_type_lock __pyx_t_8[8]; + int __pyx_t_9; + int __pyx_lineno = 0; + const char *__pyx_filename = NULL; + int __pyx_clineno = 0; + __Pyx_RefNannyDeclarations + #if CYTHON_PEP489_MULTI_PHASE_INIT + if (__pyx_m) { + if (__pyx_m == __pyx_pyinit_module) return 0; + PyErr_SetString(PyExc_RuntimeError, "Module 'box_intersection' has already been imported. Re-initialisation is not supported."); + return -1; + } + #elif PY_MAJOR_VERSION >= 3 + if (__pyx_m) return __Pyx_NewRef(__pyx_m); + #endif + /*--- Module creation code ---*/ + #if CYTHON_PEP489_MULTI_PHASE_INIT + __pyx_m = __pyx_pyinit_module; + Py_INCREF(__pyx_m); + #else + #if PY_MAJOR_VERSION < 3 + __pyx_m = Py_InitModule4("box_intersection", __pyx_methods, 0, 0, PYTHON_API_VERSION); Py_XINCREF(__pyx_m); + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #elif CYTHON_USE_MODULE_STATE + __pyx_t_1 = PyModule_Create(&__pyx_moduledef); if (unlikely(!__pyx_t_1)) __PYX_ERR(0, 1, __pyx_L1_error) + { + int add_module_result = PyState_AddModule(__pyx_t_1, &__pyx_moduledef); + __pyx_t_1 = 0; /* transfer ownership from __pyx_t_1 to box_intersection pseudovariable */ + if (unlikely((add_module_result < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + pystate_addmodule_run = 1; + } + #else + __pyx_m = PyModule_Create(&__pyx_moduledef); + if (unlikely(!__pyx_m)) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #endif + CYTHON_UNUSED_VAR(__pyx_t_1); + __pyx_d = PyModule_GetDict(__pyx_m); if (unlikely(!__pyx_d)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_d); + __pyx_b = PyImport_AddModule(__Pyx_BUILTIN_MODULE_NAME); if (unlikely(!__pyx_b)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_b); + __pyx_cython_runtime = PyImport_AddModule((char *) "cython_runtime"); if (unlikely(!__pyx_cython_runtime)) __PYX_ERR(0, 1, __pyx_L1_error) + Py_INCREF(__pyx_cython_runtime); + if (PyObject_SetAttrString(__pyx_m, "__builtins__", __pyx_b) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if CYTHON_REFNANNY +__Pyx_RefNanny = __Pyx_RefNannyImportAPI("refnanny"); +if (!__Pyx_RefNanny) { + PyErr_Clear(); + __Pyx_RefNanny = __Pyx_RefNannyImportAPI("Cython.Runtime.refnanny"); + if (!__Pyx_RefNanny) + Py_FatalError("failed to import 'refnanny' module"); +} +#endif + __Pyx_RefNannySetupContext("__Pyx_PyMODINIT_FUNC PyInit_box_intersection(void)", 0); + if (__Pyx_check_binary_version() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pxy_PyFrame_Initialize_Offsets + __Pxy_PyFrame_Initialize_Offsets(); + #endif + __pyx_empty_tuple = PyTuple_New(0); if (unlikely(!__pyx_empty_tuple)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_bytes = PyBytes_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_bytes)) __PYX_ERR(0, 1, __pyx_L1_error) + __pyx_empty_unicode = PyUnicode_FromStringAndSize("", 0); if (unlikely(!__pyx_empty_unicode)) __PYX_ERR(0, 1, __pyx_L1_error) + #ifdef __Pyx_CyFunction_USED + if (__pyx_CyFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_FusedFunction_USED + if (__pyx_FusedFunction_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Coroutine_USED + if (__pyx_Coroutine_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_Generator_USED + if (__pyx_Generator_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_AsyncGen_USED + if (__pyx_AsyncGen_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + #ifdef __Pyx_StopAsyncIteration_USED + if (__pyx_StopAsyncIteration_init(__pyx_m) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + /*--- Library function declarations ---*/ + /*--- Threads initialization code ---*/ + #if defined(WITH_THREAD) && PY_VERSION_HEX < 0x030700F0 && defined(__PYX_FORCE_INIT_THREADS) && __PYX_FORCE_INIT_THREADS + PyEval_InitThreads(); + #endif + /*--- Initialize various global constants etc. ---*/ + if (__Pyx_InitConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + stringtab_initialized = 1; + if (__Pyx_InitGlobals() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #if PY_MAJOR_VERSION < 3 && (__PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT) + if (__Pyx_init_sys_getdefaultencoding_params() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + if (__pyx_module_is_main_box_intersection) { + if (PyObject_SetAttr(__pyx_m, __pyx_n_s_name_2, __pyx_n_s_main) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + } + #if PY_MAJOR_VERSION >= 3 + { + PyObject *modules = PyImport_GetModuleDict(); if (unlikely(!modules)) __PYX_ERR(0, 1, __pyx_L1_error) + if (!PyDict_GetItemString(modules, "box_intersection")) { + if (unlikely((PyDict_SetItemString(modules, "box_intersection", __pyx_m) < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + } + } + #endif + /*--- Builtin init code ---*/ + if (__Pyx_InitCachedBuiltins() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Constants init code ---*/ + if (__Pyx_InitCachedConstants() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + /*--- Global type/function init code ---*/ + (void)__Pyx_modinit_global_init_code(); + (void)__Pyx_modinit_variable_export_code(); + (void)__Pyx_modinit_function_export_code(); + if (unlikely((__Pyx_modinit_type_init_code() < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + if (unlikely((__Pyx_modinit_type_import_code() < 0))) __PYX_ERR(0, 1, __pyx_L1_error) + (void)__Pyx_modinit_variable_import_code(); + (void)__Pyx_modinit_function_import_code(); + /*--- Execution code ---*/ + #if defined(__Pyx_Generator_USED) || defined(__Pyx_Coroutine_USED) + if (__Pyx_patch_abc() < 0) __PYX_ERR(0, 1, __pyx_L1_error) + #endif + + /* "View.MemoryView":99 + * + * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" + * try: # <<<<<<<<<<<<<< + * if __import__("sys").version_info >= (3, 3): + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "View.MemoryView":100 + * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" + * try: + * if __import__("sys").version_info >= (3, 3): # <<<<<<<<<<<<<< + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + * else: + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin___import__, __pyx_tuple__11, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 100, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_version_info); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 100, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = PyObject_RichCompare(__pyx_t_5, __pyx_tuple__12, Py_GE); __Pyx_XGOTREF(__pyx_t_4); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 100, __pyx_L2_error) + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_t_4); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(1, 100, __pyx_L2_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + if (__pyx_t_6) { + + /* "View.MemoryView":101 + * try: + * if __import__("sys").version_info >= (3, 3): + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence # <<<<<<<<<<<<<< + * else: + * __pyx_collections_abc_Sequence = __import__("collections").Sequence + */ + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin___import__, __pyx_tuple__13, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_abc); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 101, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_5, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 101, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XGOTREF(__pyx_collections_abc_Sequence); + __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, __pyx_t_4); + __Pyx_GIVEREF(__pyx_t_4); + __pyx_t_4 = 0; + + /* "View.MemoryView":100 + * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" + * try: + * if __import__("sys").version_info >= (3, 3): # <<<<<<<<<<<<<< + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + * else: + */ + goto __pyx_L8; + } + + /* "View.MemoryView":103 + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + * else: + * __pyx_collections_abc_Sequence = __import__("collections").Sequence # <<<<<<<<<<<<<< + * except: + * + */ + /*else*/ { + __pyx_t_4 = __Pyx_PyObject_Call(__pyx_builtin___import__, __pyx_tuple__14, NULL); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 103, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_5 = __Pyx_PyObject_GetAttrStr(__pyx_t_4, __pyx_n_s_Sequence); if (unlikely(!__pyx_t_5)) __PYX_ERR(1, 103, __pyx_L2_error) + __Pyx_GOTREF(__pyx_t_5); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XGOTREF(__pyx_collections_abc_Sequence); + __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, __pyx_t_5); + __Pyx_GIVEREF(__pyx_t_5); + __pyx_t_5 = 0; + } + __pyx_L8:; + + /* "View.MemoryView":99 + * + * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" + * try: # <<<<<<<<<<<<<< + * if __import__("sys").version_info >= (3, 3): + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L7_try_end; + __pyx_L2_error:; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + + /* "View.MemoryView":104 + * else: + * __pyx_collections_abc_Sequence = __import__("collections").Sequence + * except: # <<<<<<<<<<<<<< + * + * __pyx_collections_abc_Sequence = None + */ + /*except:*/ { + __Pyx_AddTraceback("View.MemoryView", __pyx_clineno, __pyx_lineno, __pyx_filename); + if (__Pyx_GetException(&__pyx_t_5, &__pyx_t_4, &__pyx_t_7) < 0) __PYX_ERR(1, 104, __pyx_L4_except_error) + __Pyx_XGOTREF(__pyx_t_5); + __Pyx_XGOTREF(__pyx_t_4); + __Pyx_XGOTREF(__pyx_t_7); + + /* "View.MemoryView":106 + * except: + * + * __pyx_collections_abc_Sequence = None # <<<<<<<<<<<<<< + * + * + */ + __Pyx_INCREF(Py_None); + __Pyx_XGOTREF(__pyx_collections_abc_Sequence); + __Pyx_DECREF_SET(__pyx_collections_abc_Sequence, Py_None); + __Pyx_GIVEREF(Py_None); + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + goto __pyx_L3_exception_handled; + } + + /* "View.MemoryView":99 + * + * cdef object __pyx_collections_abc_Sequence "__pyx_collections_abc_Sequence" + * try: # <<<<<<<<<<<<<< + * if __import__("sys").version_info >= (3, 3): + * __pyx_collections_abc_Sequence = __import__("collections.abc").abc.Sequence + */ + __pyx_L4_except_error:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + goto __pyx_L1_error; + __pyx_L3_exception_handled:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + __pyx_L7_try_end:; + } + + /* "View.MemoryView":241 + * + * + * try: # <<<<<<<<<<<<<< + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_1); + /*try:*/ { + + /* "View.MemoryView":242 + * + * try: + * count = __pyx_collections_abc_Sequence.count # <<<<<<<<<<<<<< + * index = __pyx_collections_abc_Sequence.index + * except: + */ + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_count); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 242, __pyx_L11_error) + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_SetItemOnTypeDict(__pyx_array_type, __pyx_n_s_count, __pyx_t_7) < 0) __PYX_ERR(1, 242, __pyx_L11_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + PyType_Modified(__pyx_array_type); + + /* "View.MemoryView":243 + * try: + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index # <<<<<<<<<<<<<< + * except: + * pass + */ + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_index); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 243, __pyx_L11_error) + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_SetItemOnTypeDict(__pyx_array_type, __pyx_n_s_index, __pyx_t_7) < 0) __PYX_ERR(1, 243, __pyx_L11_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + PyType_Modified(__pyx_array_type); + + /* "View.MemoryView":241 + * + * + * try: # <<<<<<<<<<<<<< + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + goto __pyx_L16_try_end; + __pyx_L11_error:; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":244 + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index + * except: # <<<<<<<<<<<<<< + * pass + * + */ + /*except:*/ { + __Pyx_ErrRestore(0,0,0); + goto __pyx_L12_exception_handled; + } + __pyx_L12_exception_handled:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); + __pyx_L16_try_end:; + } + + /* "View.MemoryView":309 + * return self.name + * + * cdef generic = Enum("") # <<<<<<<<<<<<<< + * cdef strided = Enum("") # default + * cdef indirect = Enum("") + */ + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__15, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 309, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_XGOTREF(generic); + __Pyx_DECREF_SET(generic, __pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":310 + * + * cdef generic = Enum("") + * cdef strided = Enum("") # default # <<<<<<<<<<<<<< + * cdef indirect = Enum("") + * + */ + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__16, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 310, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_XGOTREF(strided); + __Pyx_DECREF_SET(strided, __pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":311 + * cdef generic = Enum("") + * cdef strided = Enum("") # default + * cdef indirect = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__17, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 311, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_XGOTREF(indirect); + __Pyx_DECREF_SET(indirect, __pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":314 + * + * + * cdef contiguous = Enum("") # <<<<<<<<<<<<<< + * cdef indirect_contiguous = Enum("") + * + */ + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__18, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 314, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_XGOTREF(contiguous); + __Pyx_DECREF_SET(contiguous, __pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":315 + * + * cdef contiguous = Enum("") + * cdef indirect_contiguous = Enum("") # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_7 = __Pyx_PyObject_Call(((PyObject *)__pyx_MemviewEnum_type), __pyx_tuple__19, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 315, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_XGOTREF(indirect_contiguous); + __Pyx_DECREF_SET(indirect_contiguous, __pyx_t_7); + __Pyx_GIVEREF(__pyx_t_7); + __pyx_t_7 = 0; + + /* "View.MemoryView":323 + * + * + * cdef int __pyx_memoryview_thread_locks_used = 0 # <<<<<<<<<<<<<< + * cdef PyThread_type_lock[8] __pyx_memoryview_thread_locks = [ + * PyThread_allocate_lock(), + */ + __pyx_memoryview_thread_locks_used = 0; + + /* "View.MemoryView":324 + * + * cdef int __pyx_memoryview_thread_locks_used = 0 + * cdef PyThread_type_lock[8] __pyx_memoryview_thread_locks = [ # <<<<<<<<<<<<<< + * PyThread_allocate_lock(), + * PyThread_allocate_lock(), + */ + __pyx_t_8[0] = PyThread_allocate_lock(); + __pyx_t_8[1] = PyThread_allocate_lock(); + __pyx_t_8[2] = PyThread_allocate_lock(); + __pyx_t_8[3] = PyThread_allocate_lock(); + __pyx_t_8[4] = PyThread_allocate_lock(); + __pyx_t_8[5] = PyThread_allocate_lock(); + __pyx_t_8[6] = PyThread_allocate_lock(); + __pyx_t_8[7] = PyThread_allocate_lock(); + memcpy(&(__pyx_memoryview_thread_locks[0]), __pyx_t_8, sizeof(__pyx_memoryview_thread_locks[0]) * (8)); + + /* "View.MemoryView":982 + * + * + * try: # <<<<<<<<<<<<<< + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_1, &__pyx_t_2, &__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_3); + /*try:*/ { + + /* "View.MemoryView":983 + * + * try: + * count = __pyx_collections_abc_Sequence.count # <<<<<<<<<<<<<< + * index = __pyx_collections_abc_Sequence.index + * except: + */ + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_count); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 983, __pyx_L17_error) + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_SetItemOnTypeDict(__pyx_memoryviewslice_type, __pyx_n_s_count, __pyx_t_7) < 0) __PYX_ERR(1, 983, __pyx_L17_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + PyType_Modified(__pyx_memoryviewslice_type); + + /* "View.MemoryView":984 + * try: + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index # <<<<<<<<<<<<<< + * except: + * pass + */ + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_index); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 984, __pyx_L17_error) + __Pyx_GOTREF(__pyx_t_7); + if (__Pyx_SetItemOnTypeDict(__pyx_memoryviewslice_type, __pyx_n_s_index, __pyx_t_7) < 0) __PYX_ERR(1, 984, __pyx_L17_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + PyType_Modified(__pyx_memoryviewslice_type); + + /* "View.MemoryView":982 + * + * + * try: # <<<<<<<<<<<<<< + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index + */ + } + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + goto __pyx_L22_try_end; + __pyx_L17_error:; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":985 + * count = __pyx_collections_abc_Sequence.count + * index = __pyx_collections_abc_Sequence.index + * except: # <<<<<<<<<<<<<< + * pass + * + */ + /*except:*/ { + __Pyx_ErrRestore(0,0,0); + goto __pyx_L18_exception_handled; + } + __pyx_L18_exception_handled:; + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_ExceptionReset(__pyx_t_1, __pyx_t_2, __pyx_t_3); + __pyx_L22_try_end:; + } + + /* "View.MemoryView":988 + * pass + * + * try: # <<<<<<<<<<<<<< + * if __pyx_collections_abc_Sequence: + * + */ + { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ExceptionSave(&__pyx_t_3, &__pyx_t_2, &__pyx_t_1); + __Pyx_XGOTREF(__pyx_t_3); + __Pyx_XGOTREF(__pyx_t_2); + __Pyx_XGOTREF(__pyx_t_1); + /*try:*/ { + + /* "View.MemoryView":989 + * + * try: + * if __pyx_collections_abc_Sequence: # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_6 = __Pyx_PyObject_IsTrue(__pyx_collections_abc_Sequence); if (unlikely((__pyx_t_6 < 0))) __PYX_ERR(1, 989, __pyx_L23_error) + if (__pyx_t_6) { + + /* "View.MemoryView":993 + * + * + * __pyx_collections_abc_Sequence.register(_memoryviewslice) # <<<<<<<<<<<<<< + * __pyx_collections_abc_Sequence.register(array) + * except: + */ + __pyx_t_7 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_register); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 993, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_4 = __Pyx_PyObject_CallOneArg(__pyx_t_7, ((PyObject *)__pyx_memoryviewslice_type)); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 993, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "View.MemoryView":994 + * + * __pyx_collections_abc_Sequence.register(_memoryviewslice) + * __pyx_collections_abc_Sequence.register(array) # <<<<<<<<<<<<<< + * except: + * pass # ignore failure, it's a minor issue + */ + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_collections_abc_Sequence, __pyx_n_s_register); if (unlikely(!__pyx_t_4)) __PYX_ERR(1, 994, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_4); + __pyx_t_7 = __Pyx_PyObject_CallOneArg(__pyx_t_4, ((PyObject *)__pyx_array_type)); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 994, __pyx_L23_error) + __Pyx_GOTREF(__pyx_t_7); + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":989 + * + * try: + * if __pyx_collections_abc_Sequence: # <<<<<<<<<<<<<< + * + * + */ + } + + /* "View.MemoryView":988 + * pass + * + * try: # <<<<<<<<<<<<<< + * if __pyx_collections_abc_Sequence: + * + */ + } + __Pyx_XDECREF(__pyx_t_3); __pyx_t_3 = 0; + __Pyx_XDECREF(__pyx_t_2); __pyx_t_2 = 0; + __Pyx_XDECREF(__pyx_t_1); __pyx_t_1 = 0; + goto __pyx_L28_try_end; + __pyx_L23_error:; + __Pyx_XDECREF(__pyx_t_4); __pyx_t_4 = 0; + __Pyx_XDECREF(__pyx_t_5); __pyx_t_5 = 0; + __Pyx_XDECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "View.MemoryView":995 + * __pyx_collections_abc_Sequence.register(_memoryviewslice) + * __pyx_collections_abc_Sequence.register(array) + * except: # <<<<<<<<<<<<<< + * pass # ignore failure, it's a minor issue + * + */ + /*except:*/ { + __Pyx_ErrRestore(0,0,0); + goto __pyx_L24_exception_handled; + } + __pyx_L24_exception_handled:; + __Pyx_XGIVEREF(__pyx_t_3); + __Pyx_XGIVEREF(__pyx_t_2); + __Pyx_XGIVEREF(__pyx_t_1); + __Pyx_ExceptionReset(__pyx_t_3, __pyx_t_2, __pyx_t_1); + __pyx_L28_try_end:; + } + + /* "(tree fragment)":1 + * def __pyx_unpickle_Enum(__pyx_type, long __pyx_checksum, __pyx_state): # <<<<<<<<<<<<<< + * cdef object __pyx_PickleError + * cdef object __pyx_result + */ + __pyx_t_7 = PyCFunction_NewEx(&__pyx_mdef_15View_dot_MemoryView_1__pyx_unpickle_Enum, NULL, __pyx_n_s_View_MemoryView); if (unlikely(!__pyx_t_7)) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_pyx_unpickle_Enum, __pyx_t_7) < 0) __PYX_ERR(1, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "box_intersection.pyx":2 + * # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved + * import numpy as np # <<<<<<<<<<<<<< + * cimport numpy as np + * cimport cython + */ + __pyx_t_7 = __Pyx_ImportDottedModule(__pyx_n_s_numpy, NULL); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_np, __pyx_t_7) < 0) __PYX_ERR(0, 2, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + + /* "box_intersection.pyx":5 + * cimport numpy as np + * cimport cython + * cdef bint boolean_variable = True # <<<<<<<<<<<<<< + * np.import_array() + * + */ + __pyx_v_16box_intersection_boolean_variable = 1; + + /* "box_intersection.pyx":6 + * cimport cython + * cdef bint boolean_variable = True + * np.import_array() # <<<<<<<<<<<<<< + * + * + */ + __pyx_t_9 = __pyx_f_5numpy_import_array(); if (unlikely(__pyx_t_9 == ((int)-1))) __PYX_ERR(0, 6, __pyx_L1_error) + + /* "box_intersection.pyx":9 + * + * + * FLOAT = np.float32 # <<<<<<<<<<<<<< + * + * @cython.boundscheck(False) + */ + __Pyx_GetModuleGlobalName(__pyx_t_7, __pyx_n_s_np); if (unlikely(!__pyx_t_7)) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_7); + __pyx_t_4 = __Pyx_PyObject_GetAttrStr(__pyx_t_7, __pyx_n_s_float32); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + __Pyx_DECREF(__pyx_t_7); __pyx_t_7 = 0; + if (PyDict_SetItem(__pyx_d, __pyx_n_s_FLOAT, __pyx_t_4) < 0) __PYX_ERR(0, 9, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "box_intersection.pyx":11 + * FLOAT = np.float32 + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def computeIntersection(cp1, cp2, s, e): + */ + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_16box_intersection_1computeIntersection, 0, __pyx_n_s_computeIntersection, NULL, __pyx_n_s_box_intersection, __pyx_d, ((PyObject *)__pyx_codeobj__23)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_computeIntersection, __pyx_t_4) < 0) __PYX_ERR(0, 11, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "box_intersection.pyx":26 + * return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * def polygon_clip_unnest(float [:, :] subjectPolygon, float [:, :] clipPolygon): + * """ Clip a polygon with another polygon. + */ + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_16box_intersection_3polygon_clip_unnest, 0, __pyx_n_s_polygon_clip_unnest, NULL, __pyx_n_s_box_intersection, __pyx_d, ((PyObject *)__pyx_codeobj__25)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_polygon_clip_unnest, __pyx_t_4) < 0) __PYX_ERR(0, 26, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "box_intersection.pyx":115 + * return num_points + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def polygon_clip_float(float [:, :] subjectPolygon, float [:, :] clipPolygon): + */ + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_16box_intersection_5polygon_clip_float, 0, __pyx_n_s_polygon_clip_float, NULL, __pyx_n_s_box_intersection, __pyx_d, ((PyObject *)__pyx_codeobj__27)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_polygon_clip_float, __pyx_t_4) < 0) __PYX_ERR(0, 115, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "box_intersection.pyx":164 + * + * + * @cython.boundscheck(False) # <<<<<<<<<<<<<< + * @cython.wraparound(False) + * def box_intersection(float [:, :, :, :] rect1, + */ + __pyx_t_4 = __Pyx_CyFunction_New(&__pyx_mdef_16box_intersection_7box_intersection, 0, __pyx_n_s_box_intersection, NULL, __pyx_n_s_box_intersection, __pyx_d, ((PyObject *)__pyx_codeobj__29)); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 164, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_box_intersection, __pyx_t_4) < 0) __PYX_ERR(0, 164, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /* "box_intersection.pyx":1 + * # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # <<<<<<<<<<<<<< + * import numpy as np + * cimport numpy as np + */ + __pyx_t_4 = __Pyx_PyDict_NewPresized(0); if (unlikely(!__pyx_t_4)) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_GOTREF(__pyx_t_4); + if (PyDict_SetItem(__pyx_d, __pyx_n_s_test, __pyx_t_4) < 0) __PYX_ERR(0, 1, __pyx_L1_error) + __Pyx_DECREF(__pyx_t_4); __pyx_t_4 = 0; + + /*--- Wrapped vars code ---*/ + + goto __pyx_L0; + __pyx_L1_error:; + __Pyx_XDECREF(__pyx_t_4); + __Pyx_XDECREF(__pyx_t_5); + __Pyx_XDECREF(__pyx_t_7); + if (__pyx_m) { + if (__pyx_d && stringtab_initialized) { + __Pyx_AddTraceback("init box_intersection", __pyx_clineno, __pyx_lineno, __pyx_filename); + } + #if !CYTHON_USE_MODULE_STATE + Py_CLEAR(__pyx_m); + #else + Py_DECREF(__pyx_m); + if (pystate_addmodule_run) { + PyObject *tp, *value, *tb; + PyErr_Fetch(&tp, &value, &tb); + PyState_RemoveModule(&__pyx_moduledef); + PyErr_Restore(tp, value, tb); + } + #endif + } else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_ImportError, "init box_intersection"); + } + __pyx_L0:; + __Pyx_RefNannyFinishContext(); + #if CYTHON_PEP489_MULTI_PHASE_INIT + return (__pyx_m != NULL) ? 0 : -1; + #elif PY_MAJOR_VERSION >= 3 + return __pyx_m; + #else + return; + #endif +} +/* #### Code section: cleanup_globals ### */ +/* #### Code section: cleanup_module ### */ +/* #### Code section: main_method ### */ +/* #### Code section: utility_code_pragmas ### */ +#ifdef _MSC_VER +#pragma warning( push ) +/* Warning 4127: conditional expression is constant + * Cython uses constant conditional expressions to allow in inline functions to be optimized at + * compile-time, so this warning is not useful + */ +#pragma warning( disable : 4127 ) +#endif + + + +/* #### Code section: utility_code_def ### */ + +/* --- Runtime support code --- */ +/* Refnanny */ +#if CYTHON_REFNANNY +static __Pyx_RefNannyAPIStruct *__Pyx_RefNannyImportAPI(const char *modname) { + PyObject *m = NULL, *p = NULL; + void *r = NULL; + m = PyImport_ImportModule(modname); + if (!m) goto end; + p = PyObject_GetAttrString(m, "RefNannyAPI"); + if (!p) goto end; + r = PyLong_AsVoidPtr(p); +end: + Py_XDECREF(p); + Py_XDECREF(m); + return (__Pyx_RefNannyAPIStruct *)r; +} +#endif + +/* PyErrExceptionMatches */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx_PyErr_ExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; i= 0x030C00A6 + PyObject *current_exception = tstate->current_exception; + if (unlikely(!current_exception)) return 0; + exc_type = (PyObject*) Py_TYPE(current_exception); + if (exc_type == err) return 1; +#else + exc_type = tstate->curexc_type; + if (exc_type == err) return 1; + if (unlikely(!exc_type)) return 0; +#endif + #if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(exc_type); + #endif + if (unlikely(PyTuple_Check(err))) { + result = __Pyx_PyErr_ExceptionMatchesTuple(exc_type, err); + } else { + result = __Pyx_PyErr_GivenExceptionMatches(exc_type, err); + } + #if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(exc_type); + #endif + return result; +} +#endif + +/* PyErrFetchRestore */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx_ErrRestoreInState(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject *tmp_value; + assert(type == NULL || (value != NULL && type == (PyObject*) Py_TYPE(value))); + if (value) { + #if CYTHON_COMPILING_IN_CPYTHON + if (unlikely(((PyBaseExceptionObject*) value)->traceback != tb)) + #endif + PyException_SetTraceback(value, tb); + } + tmp_value = tstate->current_exception; + tstate->current_exception = value; + Py_XDECREF(tmp_value); +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + tmp_type = tstate->curexc_type; + tmp_value = tstate->curexc_value; + tmp_tb = tstate->curexc_traceback; + tstate->curexc_type = type; + tstate->curexc_value = value; + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#endif +} +static CYTHON_INLINE void __Pyx_ErrFetchInState(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { +#if PY_VERSION_HEX >= 0x030C00A6 + PyObject* exc_value; + exc_value = tstate->current_exception; + tstate->current_exception = 0; + *value = exc_value; + *type = NULL; + *tb = NULL; + if (exc_value) { + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + #if CYTHON_COMPILING_IN_CPYTHON + *tb = ((PyBaseExceptionObject*) exc_value)->traceback; + Py_XINCREF(*tb); + #else + *tb = PyException_GetTraceback(exc_value); + #endif + } +#else + *type = tstate->curexc_type; + *value = tstate->curexc_value; + *tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; +#endif +} +#endif + +/* PyObjectGetAttrStr */ +#if CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStr(PyObject* obj, PyObject* attr_name) { + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro)) + return tp->tp_getattro(obj, attr_name); +#if PY_MAJOR_VERSION < 3 + if (likely(tp->tp_getattr)) + return tp->tp_getattr(obj, PyString_AS_STRING(attr_name)); +#endif + return PyObject_GetAttr(obj, attr_name); +} +#endif + +/* PyObjectGetAttrStrNoError */ +static void __Pyx_PyObject_GetAttrStr_ClearAttributeError(void) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (likely(__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + __Pyx_PyErr_Clear(); +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GetAttrStrNoError(PyObject* obj, PyObject* attr_name) { + PyObject *result; +#if CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_TYPE_SLOTS && PY_VERSION_HEX >= 0x030700B1 + PyTypeObject* tp = Py_TYPE(obj); + if (likely(tp->tp_getattro == PyObject_GenericGetAttr)) { + return _PyObject_GenericGetAttrWithDict(obj, attr_name, NULL, 1); + } +#endif + result = __Pyx_PyObject_GetAttrStr(obj, attr_name); + if (unlikely(!result)) { + __Pyx_PyObject_GetAttrStr_ClearAttributeError(); + } + return result; +} + +/* GetBuiltinName */ +static PyObject *__Pyx_GetBuiltinName(PyObject *name) { + PyObject* result = __Pyx_PyObject_GetAttrStrNoError(__pyx_b, name); + if (unlikely(!result) && !PyErr_Occurred()) { + PyErr_Format(PyExc_NameError, +#if PY_MAJOR_VERSION >= 3 + "name '%U' is not defined", name); +#else + "name '%.200s' is not defined", PyString_AS_STRING(name)); +#endif + } + return result; +} + +/* TupleAndListFromArray */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE void __Pyx_copy_object_array(PyObject *const *CYTHON_RESTRICT src, PyObject** CYTHON_RESTRICT dest, Py_ssize_t length) { + PyObject *v; + Py_ssize_t i; + for (i = 0; i < length; i++) { + v = dest[i] = src[i]; + Py_INCREF(v); + } +} +static CYTHON_INLINE PyObject * +__Pyx_PyTuple_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + Py_INCREF(__pyx_empty_tuple); + return __pyx_empty_tuple; + } + res = PyTuple_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyTupleObject*)res)->ob_item, n); + return res; +} +static CYTHON_INLINE PyObject * +__Pyx_PyList_FromArray(PyObject *const *src, Py_ssize_t n) +{ + PyObject *res; + if (n <= 0) { + return PyList_New(0); + } + res = PyList_New(n); + if (unlikely(res == NULL)) return NULL; + __Pyx_copy_object_array(src, ((PyListObject*)res)->ob_item, n); + return res; +} +#endif + +/* BytesEquals */ +static CYTHON_INLINE int __Pyx_PyBytes_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else + if (s1 == s2) { + return (equals == Py_EQ); + } else if (PyBytes_CheckExact(s1) & PyBytes_CheckExact(s2)) { + const char *ps1, *ps2; + Py_ssize_t length = PyBytes_GET_SIZE(s1); + if (length != PyBytes_GET_SIZE(s2)) + return (equals == Py_NE); + ps1 = PyBytes_AS_STRING(s1); + ps2 = PyBytes_AS_STRING(s2); + if (ps1[0] != ps2[0]) { + return (equals == Py_NE); + } else if (length == 1) { + return (equals == Py_EQ); + } else { + int result; +#if CYTHON_USE_UNICODE_INTERNALS && (PY_VERSION_HEX < 0x030B0000) + Py_hash_t hash1, hash2; + hash1 = ((PyBytesObject*)s1)->ob_shash; + hash2 = ((PyBytesObject*)s2)->ob_shash; + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + return (equals == Py_NE); + } +#endif + result = memcmp(ps1, ps2, (size_t)length); + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & PyBytes_CheckExact(s2)) { + return (equals == Py_NE); + } else if ((s2 == Py_None) & PyBytes_CheckExact(s1)) { + return (equals == Py_NE); + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +#endif +} + +/* UnicodeEquals */ +static CYTHON_INLINE int __Pyx_PyUnicode_Equals(PyObject* s1, PyObject* s2, int equals) { +#if CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API + return PyObject_RichCompareBool(s1, s2, equals); +#else +#if PY_MAJOR_VERSION < 3 + PyObject* owned_ref = NULL; +#endif + int s1_is_unicode, s2_is_unicode; + if (s1 == s2) { + goto return_eq; + } + s1_is_unicode = PyUnicode_CheckExact(s1); + s2_is_unicode = PyUnicode_CheckExact(s2); +#if PY_MAJOR_VERSION < 3 + if ((s1_is_unicode & (!s2_is_unicode)) && PyString_CheckExact(s2)) { + owned_ref = PyUnicode_FromObject(s2); + if (unlikely(!owned_ref)) + return -1; + s2 = owned_ref; + s2_is_unicode = 1; + } else if ((s2_is_unicode & (!s1_is_unicode)) && PyString_CheckExact(s1)) { + owned_ref = PyUnicode_FromObject(s1); + if (unlikely(!owned_ref)) + return -1; + s1 = owned_ref; + s1_is_unicode = 1; + } else if (((!s2_is_unicode) & (!s1_is_unicode))) { + return __Pyx_PyBytes_Equals(s1, s2, equals); + } +#endif + if (s1_is_unicode & s2_is_unicode) { + Py_ssize_t length; + int kind; + void *data1, *data2; + if (unlikely(__Pyx_PyUnicode_READY(s1) < 0) || unlikely(__Pyx_PyUnicode_READY(s2) < 0)) + return -1; + length = __Pyx_PyUnicode_GET_LENGTH(s1); + if (length != __Pyx_PyUnicode_GET_LENGTH(s2)) { + goto return_ne; + } +#if CYTHON_USE_UNICODE_INTERNALS + { + Py_hash_t hash1, hash2; + #if CYTHON_PEP393_ENABLED + hash1 = ((PyASCIIObject*)s1)->hash; + hash2 = ((PyASCIIObject*)s2)->hash; + #else + hash1 = ((PyUnicodeObject*)s1)->hash; + hash2 = ((PyUnicodeObject*)s2)->hash; + #endif + if (hash1 != hash2 && hash1 != -1 && hash2 != -1) { + goto return_ne; + } + } +#endif + kind = __Pyx_PyUnicode_KIND(s1); + if (kind != __Pyx_PyUnicode_KIND(s2)) { + goto return_ne; + } + data1 = __Pyx_PyUnicode_DATA(s1); + data2 = __Pyx_PyUnicode_DATA(s2); + if (__Pyx_PyUnicode_READ(kind, data1, 0) != __Pyx_PyUnicode_READ(kind, data2, 0)) { + goto return_ne; + } else if (length == 1) { + goto return_eq; + } else { + int result = memcmp(data1, data2, (size_t)(length * kind)); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ) ? (result == 0) : (result != 0); + } + } else if ((s1 == Py_None) & s2_is_unicode) { + goto return_ne; + } else if ((s2 == Py_None) & s1_is_unicode) { + goto return_ne; + } else { + int result; + PyObject* py_result = PyObject_RichCompare(s1, s2, equals); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + if (!py_result) + return -1; + result = __Pyx_PyObject_IsTrue(py_result); + Py_DECREF(py_result); + return result; + } +return_eq: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_EQ); +return_ne: + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(owned_ref); + #endif + return (equals == Py_NE); +#endif +} + +/* fastcall */ +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE PyObject * __Pyx_GetKwValue_FASTCALL(PyObject *kwnames, PyObject *const *kwvalues, PyObject *s) +{ + Py_ssize_t i, n = PyTuple_GET_SIZE(kwnames); + for (i = 0; i < n; i++) + { + if (s == PyTuple_GET_ITEM(kwnames, i)) return kwvalues[i]; + } + for (i = 0; i < n; i++) + { + int eq = __Pyx_PyUnicode_Equals(s, PyTuple_GET_ITEM(kwnames, i), Py_EQ); + if (unlikely(eq != 0)) { + if (unlikely(eq < 0)) return NULL; // error + return kwvalues[i]; + } + } + return NULL; // not found (no exception set) +} +#endif + +/* RaiseArgTupleInvalid */ +static void __Pyx_RaiseArgtupleInvalid( + const char* func_name, + int exact, + Py_ssize_t num_min, + Py_ssize_t num_max, + Py_ssize_t num_found) +{ + Py_ssize_t num_expected; + const char *more_or_less; + if (num_found < num_min) { + num_expected = num_min; + more_or_less = "at least"; + } else { + num_expected = num_max; + more_or_less = "at most"; + } + if (exact) { + more_or_less = "exactly"; + } + PyErr_Format(PyExc_TypeError, + "%.200s() takes %.8s %" CYTHON_FORMAT_SSIZE_T "d positional argument%.1s (%" CYTHON_FORMAT_SSIZE_T "d given)", + func_name, more_or_less, num_expected, + (num_expected == 1) ? "" : "s", num_found); +} + +/* RaiseDoubleKeywords */ +static void __Pyx_RaiseDoubleKeywordsError( + const char* func_name, + PyObject* kw_name) +{ + PyErr_Format(PyExc_TypeError, + #if PY_MAJOR_VERSION >= 3 + "%s() got multiple values for keyword argument '%U'", func_name, kw_name); + #else + "%s() got multiple values for keyword argument '%s'", func_name, + PyString_AsString(kw_name)); + #endif +} + +/* ParseKeywords */ +static int __Pyx_ParseOptionalKeywords( + PyObject *kwds, + PyObject *const *kwvalues, + PyObject **argnames[], + PyObject *kwds2, + PyObject *values[], + Py_ssize_t num_pos_args, + const char* function_name) +{ + PyObject *key = 0, *value = 0; + Py_ssize_t pos = 0; + PyObject*** name; + PyObject*** first_kw_arg = argnames + num_pos_args; + int kwds_is_tuple = CYTHON_METH_FASTCALL && likely(PyTuple_Check(kwds)); + while (1) { + Py_XDECREF(key); key = NULL; + Py_XDECREF(value); value = NULL; + if (kwds_is_tuple) { + Py_ssize_t size; +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(kwds); +#else + size = PyTuple_Size(kwds); + if (size < 0) goto bad; +#endif + if (pos >= size) break; +#if CYTHON_AVOID_BORROWED_REFS + key = __Pyx_PySequence_ITEM(kwds, pos); + if (!key) goto bad; +#elif CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kwds, pos); +#else + key = PyTuple_GetItem(kwds, pos); + if (!key) goto bad; +#endif + value = kwvalues[pos]; + pos++; + } + else + { + if (!PyDict_Next(kwds, &pos, &key, &value)) break; +#if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(key); +#endif + } + name = first_kw_arg; + while (*name && (**name != key)) name++; + if (*name) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + Py_INCREF(value); // transfer ownership of value to values + Py_DECREF(key); +#endif + key = NULL; + value = NULL; + continue; + } +#if !CYTHON_AVOID_BORROWED_REFS + Py_INCREF(key); +#endif + Py_INCREF(value); + name = first_kw_arg; + #if PY_MAJOR_VERSION < 3 + if (likely(PyString_Check(key))) { + while (*name) { + if ((CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**name) == PyString_GET_SIZE(key)) + && _PyString_Eq(**name, key)) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; // ownership transferred to values +#endif + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + if ((**argname == key) || ( + (CYTHON_COMPILING_IN_PYPY || PyString_GET_SIZE(**argname) == PyString_GET_SIZE(key)) + && _PyString_Eq(**argname, key))) { + goto arg_passed_twice; + } + argname++; + } + } + } else + #endif + if (likely(PyUnicode_Check(key))) { + while (*name) { + int cmp = ( + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**name) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**name, key) + ); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) { + values[name-argnames] = value; +#if CYTHON_AVOID_BORROWED_REFS + value = NULL; // ownership transferred to values +#endif + break; + } + name++; + } + if (*name) continue; + else { + PyObject*** argname = argnames; + while (argname != first_kw_arg) { + int cmp = (**argname == key) ? 0 : + #if !CYTHON_COMPILING_IN_PYPY && PY_MAJOR_VERSION >= 3 + (__Pyx_PyUnicode_GET_LENGTH(**argname) != __Pyx_PyUnicode_GET_LENGTH(key)) ? 1 : + #endif + PyUnicode_Compare(**argname, key); + if (cmp < 0 && unlikely(PyErr_Occurred())) goto bad; + if (cmp == 0) goto arg_passed_twice; + argname++; + } + } + } else + goto invalid_keyword_type; + if (kwds2) { + if (unlikely(PyDict_SetItem(kwds2, key, value))) goto bad; + } else { + goto invalid_keyword; + } + } + Py_XDECREF(key); + Py_XDECREF(value); + return 0; +arg_passed_twice: + __Pyx_RaiseDoubleKeywordsError(function_name, key); + goto bad; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + goto bad; +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif +bad: + Py_XDECREF(key); + Py_XDECREF(value); + return -1; +} + +/* ArgTypeTest */ +static int __Pyx__ArgTypeTest(PyObject *obj, PyTypeObject *type, const char *name, int exact) +{ + __Pyx_TypeName type_name; + __Pyx_TypeName obj_type_name; + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + else if (exact) { + #if PY_MAJOR_VERSION == 2 + if ((type == &PyBaseString_Type) && likely(__Pyx_PyBaseString_CheckExact(obj))) return 1; + #endif + } + else { + if (likely(__Pyx_TypeCheck(obj, type))) return 1; + } + type_name = __Pyx_PyType_GetName(type); + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "Argument '%.200s' has incorrect type (expected " __Pyx_FMT_TYPENAME + ", got " __Pyx_FMT_TYPENAME ")", name, type_name, obj_type_name); + __Pyx_DECREF_TypeName(type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return 0; +} + +/* RaiseException */ +#if PY_MAJOR_VERSION < 3 +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + __Pyx_PyThreadState_declare + CYTHON_UNUSED_VAR(cause); + Py_XINCREF(type); + if (!value || value == Py_None) + value = NULL; + else + Py_INCREF(value); + if (!tb || tb == Py_None) + tb = NULL; + else { + Py_INCREF(tb); + if (!PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto raise_error; + } + } + if (PyType_Check(type)) { +#if CYTHON_COMPILING_IN_PYPY + if (!value) { + Py_INCREF(Py_None); + value = Py_None; + } +#endif + PyErr_NormalizeException(&type, &value, &tb); + } else { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto raise_error; + } + value = type; + type = (PyObject*) Py_TYPE(type); + Py_INCREF(type); + if (!PyType_IsSubtype((PyTypeObject *)type, (PyTypeObject *)PyExc_BaseException)) { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto raise_error; + } + } + __Pyx_PyThreadState_assign + __Pyx_ErrRestore(type, value, tb); + return; +raise_error: + Py_XDECREF(value); + Py_XDECREF(type); + Py_XDECREF(tb); + return; +} +#else +static void __Pyx_Raise(PyObject *type, PyObject *value, PyObject *tb, PyObject *cause) { + PyObject* owned_instance = NULL; + if (tb == Py_None) { + tb = 0; + } else if (tb && !PyTraceBack_Check(tb)) { + PyErr_SetString(PyExc_TypeError, + "raise: arg 3 must be a traceback or None"); + goto bad; + } + if (value == Py_None) + value = 0; + if (PyExceptionInstance_Check(type)) { + if (value) { + PyErr_SetString(PyExc_TypeError, + "instance exception may not have a separate value"); + goto bad; + } + value = type; + type = (PyObject*) Py_TYPE(value); + } else if (PyExceptionClass_Check(type)) { + PyObject *instance_class = NULL; + if (value && PyExceptionInstance_Check(value)) { + instance_class = (PyObject*) Py_TYPE(value); + if (instance_class != type) { + int is_subclass = PyObject_IsSubclass(instance_class, type); + if (!is_subclass) { + instance_class = NULL; + } else if (unlikely(is_subclass == -1)) { + goto bad; + } else { + type = instance_class; + } + } + } + if (!instance_class) { + PyObject *args; + if (!value) + args = PyTuple_New(0); + else if (PyTuple_Check(value)) { + Py_INCREF(value); + args = value; + } else + args = PyTuple_Pack(1, value); + if (!args) + goto bad; + owned_instance = PyObject_Call(type, args, NULL); + Py_DECREF(args); + if (!owned_instance) + goto bad; + value = owned_instance; + if (!PyExceptionInstance_Check(value)) { + PyErr_Format(PyExc_TypeError, + "calling %R should have returned an instance of " + "BaseException, not %R", + type, Py_TYPE(value)); + goto bad; + } + } + } else { + PyErr_SetString(PyExc_TypeError, + "raise: exception class must be a subclass of BaseException"); + goto bad; + } + if (cause) { + PyObject *fixed_cause; + if (cause == Py_None) { + fixed_cause = NULL; + } else if (PyExceptionClass_Check(cause)) { + fixed_cause = PyObject_CallObject(cause, NULL); + if (fixed_cause == NULL) + goto bad; + } else if (PyExceptionInstance_Check(cause)) { + fixed_cause = cause; + Py_INCREF(fixed_cause); + } else { + PyErr_SetString(PyExc_TypeError, + "exception causes must derive from " + "BaseException"); + goto bad; + } + PyException_SetCause(value, fixed_cause); + } + PyErr_SetObject(type, value); + if (tb) { + #if PY_VERSION_HEX >= 0x030C00A6 + PyException_SetTraceback(value, tb); + #elif CYTHON_FAST_THREAD_STATE + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject* tmp_tb = tstate->curexc_traceback; + if (tb != tmp_tb) { + Py_INCREF(tb); + tstate->curexc_traceback = tb; + Py_XDECREF(tmp_tb); + } +#else + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_Fetch(&tmp_type, &tmp_value, &tmp_tb); + Py_INCREF(tb); + PyErr_Restore(tmp_type, tmp_value, tb); + Py_XDECREF(tmp_tb); +#endif + } +bad: + Py_XDECREF(owned_instance); + return; +} +#endif + +/* PyFunctionFastCall */ +#if CYTHON_FAST_PYCALL && !CYTHON_VECTORCALL +static PyObject* __Pyx_PyFunction_FastCallNoKw(PyCodeObject *co, PyObject **args, Py_ssize_t na, + PyObject *globals) { + PyFrameObject *f; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject **fastlocals; + Py_ssize_t i; + PyObject *result; + assert(globals != NULL); + /* XXX Perhaps we should create a specialized + PyFrame_New() that doesn't take locals, but does + take builtins without sanity checking them. + */ + assert(tstate != NULL); + f = PyFrame_New(tstate, co, globals, NULL); + if (f == NULL) { + return NULL; + } + fastlocals = __Pyx_PyFrame_GetLocalsplus(f); + for (i = 0; i < na; i++) { + Py_INCREF(*args); + fastlocals[i] = *args++; + } + result = PyEval_EvalFrameEx(f,0); + ++tstate->recursion_depth; + Py_DECREF(f); + --tstate->recursion_depth; + return result; +} +static PyObject *__Pyx_PyFunction_FastCallDict(PyObject *func, PyObject **args, Py_ssize_t nargs, PyObject *kwargs) { + PyCodeObject *co = (PyCodeObject *)PyFunction_GET_CODE(func); + PyObject *globals = PyFunction_GET_GLOBALS(func); + PyObject *argdefs = PyFunction_GET_DEFAULTS(func); + PyObject *closure; +#if PY_MAJOR_VERSION >= 3 + PyObject *kwdefs; +#endif + PyObject *kwtuple, **k; + PyObject **d; + Py_ssize_t nd; + Py_ssize_t nk; + PyObject *result; + assert(kwargs == NULL || PyDict_Check(kwargs)); + nk = kwargs ? PyDict_Size(kwargs) : 0; + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) { + return NULL; + } + if ( +#if PY_MAJOR_VERSION >= 3 + co->co_kwonlyargcount == 0 && +#endif + likely(kwargs == NULL || nk == 0) && + co->co_flags == (CO_OPTIMIZED | CO_NEWLOCALS | CO_NOFREE)) { + if (argdefs == NULL && co->co_argcount == nargs) { + result = __Pyx_PyFunction_FastCallNoKw(co, args, nargs, globals); + goto done; + } + else if (nargs == 0 && argdefs != NULL + && co->co_argcount == Py_SIZE(argdefs)) { + /* function called with no arguments, but all parameters have + a default value: use default values as arguments .*/ + args = &PyTuple_GET_ITEM(argdefs, 0); + result =__Pyx_PyFunction_FastCallNoKw(co, args, Py_SIZE(argdefs), globals); + goto done; + } + } + if (kwargs != NULL) { + Py_ssize_t pos, i; + kwtuple = PyTuple_New(2 * nk); + if (kwtuple == NULL) { + result = NULL; + goto done; + } + k = &PyTuple_GET_ITEM(kwtuple, 0); + pos = i = 0; + while (PyDict_Next(kwargs, &pos, &k[i], &k[i+1])) { + Py_INCREF(k[i]); + Py_INCREF(k[i+1]); + i += 2; + } + nk = i / 2; + } + else { + kwtuple = NULL; + k = NULL; + } + closure = PyFunction_GET_CLOSURE(func); +#if PY_MAJOR_VERSION >= 3 + kwdefs = PyFunction_GET_KW_DEFAULTS(func); +#endif + if (argdefs != NULL) { + d = &PyTuple_GET_ITEM(argdefs, 0); + nd = Py_SIZE(argdefs); + } + else { + d = NULL; + nd = 0; + } +#if PY_MAJOR_VERSION >= 3 + result = PyEval_EvalCodeEx((PyObject*)co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, kwdefs, closure); +#else + result = PyEval_EvalCodeEx(co, globals, (PyObject *)NULL, + args, (int)nargs, + k, (int)nk, + d, (int)nd, closure); +#endif + Py_XDECREF(kwtuple); +done: + Py_LeaveRecursiveCall(); + return result; +} +#endif + +/* PyObjectCall */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *result; + ternaryfunc call = Py_TYPE(func)->tp_call; + if (unlikely(!call)) + return PyObject_Call(func, arg, kw); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = (*call)(func, arg, kw); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectCallMethO */ +#if CYTHON_COMPILING_IN_CPYTHON +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallMethO(PyObject *func, PyObject *arg) { + PyObject *self, *result; + PyCFunction cfunc; + cfunc = PyCFunction_GET_FUNCTION(func); + self = PyCFunction_GET_SELF(func); + if (unlikely(Py_EnterRecursiveCall((char*)" while calling a Python object"))) + return NULL; + result = cfunc(self, arg); + Py_LeaveRecursiveCall(); + if (unlikely(!result) && unlikely(!PyErr_Occurred())) { + PyErr_SetString( + PyExc_SystemError, + "NULL result without error in PyObject_Call"); + } + return result; +} +#endif + +/* PyObjectFastCall */ +static PyObject* __Pyx_PyObject_FastCall_fallback(PyObject *func, PyObject **args, size_t nargs, PyObject *kwargs) { + PyObject *argstuple; + PyObject *result = 0; + size_t i; + argstuple = PyTuple_New((Py_ssize_t)nargs); + if (unlikely(!argstuple)) return NULL; + for (i = 0; i < nargs; i++) { + Py_INCREF(args[i]); + if (__Pyx_PyTuple_SET_ITEM(argstuple, (Py_ssize_t)i, args[i]) < 0) goto bad; + } + result = __Pyx_PyObject_Call(func, argstuple, kwargs); + bad: + Py_DECREF(argstuple); + return result; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_FastCallDict(PyObject *func, PyObject **args, size_t _nargs, PyObject *kwargs) { + Py_ssize_t nargs = __Pyx_PyVectorcall_NARGS(_nargs); +#if CYTHON_COMPILING_IN_CPYTHON + if (nargs == 0 && kwargs == NULL) { +#if defined(__Pyx_CyFunction_USED) && defined(NDEBUG) + if (__Pyx_IsCyOrPyCFunction(func)) +#else + if (PyCFunction_Check(func)) +#endif + { + if (likely(PyCFunction_GET_FLAGS(func) & METH_NOARGS)) { + return __Pyx_PyObject_CallMethO(func, NULL); + } + } + } + else if (nargs == 1 && kwargs == NULL) { + if (PyCFunction_Check(func)) + { + if (likely(PyCFunction_GET_FLAGS(func) & METH_O)) { + return __Pyx_PyObject_CallMethO(func, args[0]); + } + } + } +#endif + #if PY_VERSION_HEX < 0x030800B1 + #if CYTHON_FAST_PYCCALL + if (PyCFunction_Check(func)) { + if (kwargs) { + return _PyCFunction_FastCallDict(func, args, nargs, kwargs); + } else { + return _PyCFunction_FastCallKeywords(func, args, nargs, NULL); + } + } + #if PY_VERSION_HEX >= 0x030700A1 + if (!kwargs && __Pyx_IS_TYPE(func, &PyMethodDescr_Type)) { + return _PyMethodDescr_FastCallKeywords(func, args, nargs, NULL); + } + #endif + #endif + #if CYTHON_FAST_PYCALL + if (PyFunction_Check(func)) { + return __Pyx_PyFunction_FastCallDict(func, args, nargs, kwargs); + } + #endif + #endif + #if CYTHON_VECTORCALL + #if Py_VERSION_HEX < 0x03090000 + vectorcallfunc f = _PyVectorcall_Function(func); + #else + vectorcallfunc f = PyVectorcall_Function(func); + #endif + if (f) { + return f(func, args, (size_t)nargs, kwargs); + } + #elif defined(__Pyx_CyFunction_USED) && CYTHON_BACKPORT_VECTORCALL + if (__Pyx_CyFunction_CheckExact(func)) { + __pyx_vectorcallfunc f = __Pyx_CyFunction_func_vectorcall(func); + if (f) return f(func, args, (size_t)nargs, kwargs); + } + #endif + if (nargs == 0) { + return __Pyx_PyObject_Call(func, __pyx_empty_tuple, kwargs); + } + return __Pyx_PyObject_FastCall_fallback(func, args, (size_t)nargs, kwargs); +} + +/* RaiseUnexpectedTypeError */ +static int +__Pyx_RaiseUnexpectedTypeError(const char *expected, PyObject *obj) +{ + __Pyx_TypeName obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, "Expected %s, got " __Pyx_FMT_TYPENAME, + expected, obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return 0; +} + +/* CIntToDigits */ +static const char DIGIT_PAIRS_10[2*10*10+1] = { + "00010203040506070809" + "10111213141516171819" + "20212223242526272829" + "30313233343536373839" + "40414243444546474849" + "50515253545556575859" + "60616263646566676869" + "70717273747576777879" + "80818283848586878889" + "90919293949596979899" +}; +static const char DIGIT_PAIRS_8[2*8*8+1] = { + "0001020304050607" + "1011121314151617" + "2021222324252627" + "3031323334353637" + "4041424344454647" + "5051525354555657" + "6061626364656667" + "7071727374757677" +}; +static const char DIGITS_HEX[2*16+1] = { + "0123456789abcdef" + "0123456789ABCDEF" +}; + +/* BuildPyUnicode */ +static PyObject* __Pyx_PyUnicode_BuildFromAscii(Py_ssize_t ulength, char* chars, int clength, + int prepend_sign, char padding_char) { + PyObject *uval; + Py_ssize_t uoffset = ulength - clength; +#if CYTHON_USE_UNICODE_INTERNALS + Py_ssize_t i; +#if CYTHON_PEP393_ENABLED + void *udata; + uval = PyUnicode_New(ulength, 127); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_DATA(uval); +#else + Py_UNICODE *udata; + uval = PyUnicode_FromUnicode(NULL, ulength); + if (unlikely(!uval)) return NULL; + udata = PyUnicode_AS_UNICODE(uval); +#endif + if (uoffset > 0) { + i = 0; + if (prepend_sign) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, 0, '-'); + i++; + } + for (; i < uoffset; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, i, padding_char); + } + } + for (i=0; i < clength; i++) { + __Pyx_PyUnicode_WRITE(PyUnicode_1BYTE_KIND, udata, uoffset+i, chars[i]); + } +#else + { + PyObject *sign = NULL, *padding = NULL; + uval = NULL; + if (uoffset > 0) { + prepend_sign = !!prepend_sign; + if (uoffset > prepend_sign) { + padding = PyUnicode_FromOrdinal(padding_char); + if (likely(padding) && uoffset > prepend_sign + 1) { + PyObject *tmp; + PyObject *repeat = PyInt_FromSsize_t(uoffset - prepend_sign); + if (unlikely(!repeat)) goto done_or_error; + tmp = PyNumber_Multiply(padding, repeat); + Py_DECREF(repeat); + Py_DECREF(padding); + padding = tmp; + } + if (unlikely(!padding)) goto done_or_error; + } + if (prepend_sign) { + sign = PyUnicode_FromOrdinal('-'); + if (unlikely(!sign)) goto done_or_error; + } + } + uval = PyUnicode_DecodeASCII(chars, clength, NULL); + if (likely(uval) && padding) { + PyObject *tmp = PyNumber_Add(padding, uval); + Py_DECREF(uval); + uval = tmp; + } + if (likely(uval) && sign) { + PyObject *tmp = PyNumber_Add(sign, uval); + Py_DECREF(uval); + uval = tmp; + } +done_or_error: + Py_XDECREF(padding); + Py_XDECREF(sign); + } +#endif + return uval; +} + +/* CIntToPyUnicode */ +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_int(int value, Py_ssize_t width, char padding_char, char format_char) { + char digits[sizeof(int)*3+2]; + char *dpos, *end = digits + sizeof(int)*3+2; + const char *hex_digits = DIGITS_HEX; + Py_ssize_t length, ulength; + int prepend_sign, last_one_off; + int remaining; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (format_char == 'X') { + hex_digits += 16; + format_char = 'x'; + } + remaining = value; + last_one_off = 0; + dpos = end; + do { + int digit_pos; + switch (format_char) { + case 'o': + digit_pos = abs((int)(remaining % (8*8))); + remaining = (int) (remaining / (8*8)); + dpos -= 2; + memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); + last_one_off = (digit_pos < 8); + break; + case 'd': + digit_pos = abs((int)(remaining % (10*10))); + remaining = (int) (remaining / (10*10)); + dpos -= 2; + memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); + last_one_off = (digit_pos < 10); + break; + case 'x': + *(--dpos) = hex_digits[abs((int)(remaining % 16))]; + remaining = (int) (remaining / 16); + break; + default: + assert(0); + break; + } + } while (unlikely(remaining != 0)); + assert(!last_one_off || *dpos == '0'); + dpos += last_one_off; + length = end - dpos; + ulength = length; + prepend_sign = 0; + if (!is_unsigned && value <= neg_one) { + if (padding_char == ' ' || width <= length + 1) { + *(--dpos) = '-'; + ++length; + } else { + prepend_sign = 1; + } + ++ulength; + } + if (width > ulength) { + ulength = width; + } + if (ulength == 1) { + return PyUnicode_FromOrdinal(*dpos); + } + return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); +} + +/* CIntToPyUnicode */ +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_From_Py_ssize_t(Py_ssize_t value, Py_ssize_t width, char padding_char, char format_char) { + char digits[sizeof(Py_ssize_t)*3+2]; + char *dpos, *end = digits + sizeof(Py_ssize_t)*3+2; + const char *hex_digits = DIGITS_HEX; + Py_ssize_t length, ulength; + int prepend_sign, last_one_off; + Py_ssize_t remaining; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const Py_ssize_t neg_one = (Py_ssize_t) -1, const_zero = (Py_ssize_t) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (format_char == 'X') { + hex_digits += 16; + format_char = 'x'; + } + remaining = value; + last_one_off = 0; + dpos = end; + do { + int digit_pos; + switch (format_char) { + case 'o': + digit_pos = abs((int)(remaining % (8*8))); + remaining = (Py_ssize_t) (remaining / (8*8)); + dpos -= 2; + memcpy(dpos, DIGIT_PAIRS_8 + digit_pos * 2, 2); + last_one_off = (digit_pos < 8); + break; + case 'd': + digit_pos = abs((int)(remaining % (10*10))); + remaining = (Py_ssize_t) (remaining / (10*10)); + dpos -= 2; + memcpy(dpos, DIGIT_PAIRS_10 + digit_pos * 2, 2); + last_one_off = (digit_pos < 10); + break; + case 'x': + *(--dpos) = hex_digits[abs((int)(remaining % 16))]; + remaining = (Py_ssize_t) (remaining / 16); + break; + default: + assert(0); + break; + } + } while (unlikely(remaining != 0)); + assert(!last_one_off || *dpos == '0'); + dpos += last_one_off; + length = end - dpos; + ulength = length; + prepend_sign = 0; + if (!is_unsigned && value <= neg_one) { + if (padding_char == ' ' || width <= length + 1) { + *(--dpos) = '-'; + ++length; + } else { + prepend_sign = 1; + } + ++ulength; + } + if (width > ulength) { + ulength = width; + } + if (ulength == 1) { + return PyUnicode_FromOrdinal(*dpos); + } + return __Pyx_PyUnicode_BuildFromAscii(ulength, dpos, (int) length, prepend_sign, padding_char); +} + +/* JoinPyUnicode */ +static PyObject* __Pyx_PyUnicode_Join(PyObject* value_tuple, Py_ssize_t value_count, Py_ssize_t result_ulength, + Py_UCS4 max_char) { +#if CYTHON_USE_UNICODE_INTERNALS && CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + PyObject *result_uval; + int result_ukind, kind_shift; + Py_ssize_t i, char_pos; + void *result_udata; + CYTHON_MAYBE_UNUSED_VAR(max_char); +#if CYTHON_PEP393_ENABLED + result_uval = PyUnicode_New(result_ulength, max_char); + if (unlikely(!result_uval)) return NULL; + result_ukind = (max_char <= 255) ? PyUnicode_1BYTE_KIND : (max_char <= 65535) ? PyUnicode_2BYTE_KIND : PyUnicode_4BYTE_KIND; + kind_shift = (result_ukind == PyUnicode_4BYTE_KIND) ? 2 : result_ukind - 1; + result_udata = PyUnicode_DATA(result_uval); +#else + result_uval = PyUnicode_FromUnicode(NULL, result_ulength); + if (unlikely(!result_uval)) return NULL; + result_ukind = sizeof(Py_UNICODE); + kind_shift = (result_ukind == 4) ? 2 : result_ukind - 1; + result_udata = PyUnicode_AS_UNICODE(result_uval); +#endif + assert(kind_shift == 2 || kind_shift == 1 || kind_shift == 0); + char_pos = 0; + for (i=0; i < value_count; i++) { + int ukind; + Py_ssize_t ulength; + void *udata; + PyObject *uval = PyTuple_GET_ITEM(value_tuple, i); + if (unlikely(__Pyx_PyUnicode_READY(uval))) + goto bad; + ulength = __Pyx_PyUnicode_GET_LENGTH(uval); + if (unlikely(!ulength)) + continue; + if (unlikely((PY_SSIZE_T_MAX >> kind_shift) - ulength < char_pos)) + goto overflow; + ukind = __Pyx_PyUnicode_KIND(uval); + udata = __Pyx_PyUnicode_DATA(uval); + if (!CYTHON_PEP393_ENABLED || ukind == result_ukind) { + memcpy((char *)result_udata + (char_pos << kind_shift), udata, (size_t) (ulength << kind_shift)); + } else { + #if PY_VERSION_HEX >= 0x030D0000 + if (unlikely(PyUnicode_CopyCharacters(result_uval, char_pos, uval, 0, ulength) < 0)) goto bad; + #elif CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030300F0 || defined(_PyUnicode_FastCopyCharacters) + _PyUnicode_FastCopyCharacters(result_uval, char_pos, uval, 0, ulength); + #else + Py_ssize_t j; + for (j=0; j < ulength; j++) { + Py_UCS4 uchar = __Pyx_PyUnicode_READ(ukind, udata, j); + __Pyx_PyUnicode_WRITE(result_ukind, result_udata, char_pos+j, uchar); + } + #endif + } + char_pos += ulength; + } + return result_uval; +overflow: + PyErr_SetString(PyExc_OverflowError, "join() result is too long for a Python string"); +bad: + Py_DECREF(result_uval); + return NULL; +#else + CYTHON_UNUSED_VAR(max_char); + CYTHON_UNUSED_VAR(result_ulength); + CYTHON_UNUSED_VAR(value_count); + return PyUnicode_Join(__pyx_empty_unicode, value_tuple); +#endif +} + +/* GetAttr */ +static CYTHON_INLINE PyObject *__Pyx_GetAttr(PyObject *o, PyObject *n) { +#if CYTHON_USE_TYPE_SLOTS +#if PY_MAJOR_VERSION >= 3 + if (likely(PyUnicode_Check(n))) +#else + if (likely(PyString_Check(n))) +#endif + return __Pyx_PyObject_GetAttrStr(o, n); +#endif + return PyObject_GetAttr(o, n); +} + +/* GetItemInt */ +static PyObject *__Pyx_GetItemInt_Generic(PyObject *o, PyObject* j) { + PyObject *r; + if (unlikely(!j)) return NULL; + r = PyObject_GetItem(o, j); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_List_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyList_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyList_GET_SIZE(o)))) { + PyObject *r = PyList_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Tuple_Fast(PyObject *o, Py_ssize_t i, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + Py_ssize_t wrapped_i = i; + if (wraparound & unlikely(i < 0)) { + wrapped_i += PyTuple_GET_SIZE(o); + } + if ((!boundscheck) || likely(__Pyx_is_valid_index(wrapped_i, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, wrapped_i); + Py_INCREF(r); + return r; + } + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +#else + return PySequence_GetItem(o, i); +#endif +} +static CYTHON_INLINE PyObject *__Pyx_GetItemInt_Fast(PyObject *o, Py_ssize_t i, int is_list, + CYTHON_NCP_UNUSED int wraparound, + CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyList_GET_SIZE(o); + if ((!boundscheck) || (likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o))))) { + PyObject *r = PyList_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } + else if (PyTuple_CheckExact(o)) { + Py_ssize_t n = ((!wraparound) | likely(i >= 0)) ? i : i + PyTuple_GET_SIZE(o); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyTuple_GET_SIZE(o)))) { + PyObject *r = PyTuple_GET_ITEM(o, n); + Py_INCREF(r); + return r; + } + } else { + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (mm && mm->mp_subscript) { + PyObject *r, *key = PyInt_FromSsize_t(i); + if (unlikely(!key)) return NULL; + r = mm->mp_subscript(o, key); + Py_DECREF(key); + return r; + } + if (likely(sm && sm->sq_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return NULL; + PyErr_Clear(); + } + } + return sm->sq_item(o, i); + } + } +#else + if (is_list || PySequence_Check(o)) { + return PySequence_GetItem(o, i); + } +#endif + return __Pyx_GetItemInt_Generic(o, PyInt_FromSsize_t(i)); +} + +/* PyObjectCallOneArg */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallOneArg(PyObject *func, PyObject *arg) { + PyObject *args[2] = {NULL, arg}; + return __Pyx_PyObject_FastCall(func, args+1, 1 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* ObjectGetItem */ +#if CYTHON_USE_TYPE_SLOTS +static PyObject *__Pyx_PyObject_GetIndex(PyObject *obj, PyObject *index) { + PyObject *runerr = NULL; + Py_ssize_t key_value; + key_value = __Pyx_PyIndex_AsSsize_t(index); + if (likely(key_value != -1 || !(runerr = PyErr_Occurred()))) { + return __Pyx_GetItemInt_Fast(obj, key_value, 0, 1, 1); + } + if (PyErr_GivenExceptionMatches(runerr, PyExc_OverflowError)) { + __Pyx_TypeName index_type_name = __Pyx_PyType_GetName(Py_TYPE(index)); + PyErr_Clear(); + PyErr_Format(PyExc_IndexError, + "cannot fit '" __Pyx_FMT_TYPENAME "' into an index-sized integer", index_type_name); + __Pyx_DECREF_TypeName(index_type_name); + } + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem_Slow(PyObject *obj, PyObject *key) { + __Pyx_TypeName obj_type_name; + if (likely(PyType_Check(obj))) { + PyObject *meth = __Pyx_PyObject_GetAttrStrNoError(obj, __pyx_n_s_class_getitem); + if (meth) { + PyObject *result = __Pyx_PyObject_CallOneArg(meth, key); + Py_DECREF(meth); + return result; + } + } + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' object is not subscriptable", obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return NULL; +} +static PyObject *__Pyx_PyObject_GetItem(PyObject *obj, PyObject *key) { + PyTypeObject *tp = Py_TYPE(obj); + PyMappingMethods *mm = tp->tp_as_mapping; + PySequenceMethods *sm = tp->tp_as_sequence; + if (likely(mm && mm->mp_subscript)) { + return mm->mp_subscript(obj, key); + } + if (likely(sm && sm->sq_item)) { + return __Pyx_PyObject_GetIndex(obj, key); + } + return __Pyx_PyObject_GetItem_Slow(obj, key); +} +#endif + +/* KeywordStringCheck */ +static int __Pyx_CheckKeywordStrings( + PyObject *kw, + const char* function_name, + int kw_allowed) +{ + PyObject* key = 0; + Py_ssize_t pos = 0; +#if CYTHON_COMPILING_IN_PYPY + if (!kw_allowed && PyDict_Next(kw, &pos, &key, 0)) + goto invalid_keyword; + return 1; +#else + if (CYTHON_METH_FASTCALL && likely(PyTuple_Check(kw))) { + Py_ssize_t kwsize; +#if CYTHON_ASSUME_SAFE_MACROS + kwsize = PyTuple_GET_SIZE(kw); +#else + kwsize = PyTuple_Size(kw); + if (kwsize < 0) return 0; +#endif + if (unlikely(kwsize == 0)) + return 1; + if (!kw_allowed) { +#if CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kw, 0); +#else + key = PyTuple_GetItem(kw, pos); + if (!key) return 0; +#endif + goto invalid_keyword; + } +#if PY_VERSION_HEX < 0x03090000 + for (pos = 0; pos < kwsize; pos++) { +#if CYTHON_ASSUME_SAFE_MACROS + key = PyTuple_GET_ITEM(kw, pos); +#else + key = PyTuple_GetItem(kw, pos); + if (!key) return 0; +#endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } +#endif + return 1; + } + while (PyDict_Next(kw, &pos, &key, 0)) { + #if PY_MAJOR_VERSION < 3 + if (unlikely(!PyString_Check(key))) + #endif + if (unlikely(!PyUnicode_Check(key))) + goto invalid_keyword_type; + } + if (!kw_allowed && unlikely(key)) + goto invalid_keyword; + return 1; +invalid_keyword_type: + PyErr_Format(PyExc_TypeError, + "%.200s() keywords must be strings", function_name); + return 0; +#endif +invalid_keyword: + #if PY_MAJOR_VERSION < 3 + PyErr_Format(PyExc_TypeError, + "%.200s() got an unexpected keyword argument '%.200s'", + function_name, PyString_AsString(key)); + #else + PyErr_Format(PyExc_TypeError, + "%s() got an unexpected keyword argument '%U'", + function_name, key); + #endif + return 0; +} + +/* DivInt[Py_ssize_t] */ +static CYTHON_INLINE Py_ssize_t __Pyx_div_Py_ssize_t(Py_ssize_t a, Py_ssize_t b) { + Py_ssize_t q = a / b; + Py_ssize_t r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* GetAttr3 */ +static PyObject *__Pyx_GetAttr3Default(PyObject *d) { + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + if (unlikely(!__Pyx_PyErr_ExceptionMatches(PyExc_AttributeError))) + return NULL; + __Pyx_PyErr_Clear(); + Py_INCREF(d); + return d; +} +static CYTHON_INLINE PyObject *__Pyx_GetAttr3(PyObject *o, PyObject *n, PyObject *d) { + PyObject *r; +#if CYTHON_USE_TYPE_SLOTS + if (likely(PyString_Check(n))) { + r = __Pyx_PyObject_GetAttrStrNoError(o, n); + if (unlikely(!r) && likely(!PyErr_Occurred())) { + r = __Pyx_NewRef(d); + } + return r; + } +#endif + r = PyObject_GetAttr(o, n); + return (likely(r)) ? r : __Pyx_GetAttr3Default(d); +} + +/* PyDictVersioning */ +#if CYTHON_USE_DICT_VERSIONS && CYTHON_USE_TYPE_SLOTS +static CYTHON_INLINE PY_UINT64_T __Pyx_get_tp_dict_version(PyObject *obj) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + return likely(dict) ? __PYX_GET_DICT_VERSION(dict) : 0; +} +static CYTHON_INLINE PY_UINT64_T __Pyx_get_object_dict_version(PyObject *obj) { + PyObject **dictptr = NULL; + Py_ssize_t offset = Py_TYPE(obj)->tp_dictoffset; + if (offset) { +#if CYTHON_COMPILING_IN_CPYTHON + dictptr = (likely(offset > 0)) ? (PyObject **) ((char *)obj + offset) : _PyObject_GetDictPtr(obj); +#else + dictptr = _PyObject_GetDictPtr(obj); +#endif + } + return (dictptr && *dictptr) ? __PYX_GET_DICT_VERSION(*dictptr) : 0; +} +static CYTHON_INLINE int __Pyx_object_dict_version_matches(PyObject* obj, PY_UINT64_T tp_dict_version, PY_UINT64_T obj_dict_version) { + PyObject *dict = Py_TYPE(obj)->tp_dict; + if (unlikely(!dict) || unlikely(tp_dict_version != __PYX_GET_DICT_VERSION(dict))) + return 0; + return obj_dict_version == __Pyx_get_object_dict_version(obj); +} +#endif + +/* GetModuleGlobalName */ +#if CYTHON_USE_DICT_VERSIONS +static PyObject *__Pyx__GetModuleGlobalName(PyObject *name, PY_UINT64_T *dict_version, PyObject **dict_cached_value) +#else +static CYTHON_INLINE PyObject *__Pyx__GetModuleGlobalName(PyObject *name) +#endif +{ + PyObject *result; +#if !CYTHON_AVOID_BORROWED_REFS +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030500A1 + result = _PyDict_GetItem_KnownHash(__pyx_d, name, ((PyASCIIObject *) name)->hash); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } else if (unlikely(PyErr_Occurred())) { + return NULL; + } +#elif CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(!__pyx_m)) { + return NULL; + } + result = PyObject_GetAttr(__pyx_m, name); + if (likely(result)) { + return result; + } +#else + result = PyDict_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } +#endif +#else + result = PyObject_GetItem(__pyx_d, name); + __PYX_UPDATE_DICT_CACHE(__pyx_d, result, *dict_cached_value, *dict_version) + if (likely(result)) { + return __Pyx_NewRef(result); + } + PyErr_Clear(); +#endif + return __Pyx_GetBuiltinName(name); +} + +/* RaiseTooManyValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseTooManyValuesError(Py_ssize_t expected) { + PyErr_Format(PyExc_ValueError, + "too many values to unpack (expected %" CYTHON_FORMAT_SSIZE_T "d)", expected); +} + +/* RaiseNeedMoreValuesToUnpack */ +static CYTHON_INLINE void __Pyx_RaiseNeedMoreValuesError(Py_ssize_t index) { + PyErr_Format(PyExc_ValueError, + "need more than %" CYTHON_FORMAT_SSIZE_T "d value%.1s to unpack", + index, (index == 1) ? "" : "s"); +} + +/* RaiseNoneIterError */ +static CYTHON_INLINE void __Pyx_RaiseNoneNotIterableError(void) { + PyErr_SetString(PyExc_TypeError, "'NoneType' object is not iterable"); +} + +/* ExtTypeTest */ +static CYTHON_INLINE int __Pyx_TypeTest(PyObject *obj, PyTypeObject *type) { + __Pyx_TypeName obj_type_name; + __Pyx_TypeName type_name; + if (unlikely(!type)) { + PyErr_SetString(PyExc_SystemError, "Missing type object"); + return 0; + } + if (likely(__Pyx_TypeCheck(obj, type))) + return 1; + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + type_name = __Pyx_PyType_GetName(type); + PyErr_Format(PyExc_TypeError, + "Cannot convert " __Pyx_FMT_TYPENAME " to " __Pyx_FMT_TYPENAME, + obj_type_name, type_name); + __Pyx_DECREF_TypeName(obj_type_name); + __Pyx_DECREF_TypeName(type_name); + return 0; +} + +/* GetTopmostException */ +#if CYTHON_USE_EXC_INFO_STACK && CYTHON_FAST_THREAD_STATE +static _PyErr_StackItem * +__Pyx_PyErr_GetTopmostException(PyThreadState *tstate) +{ + _PyErr_StackItem *exc_info = tstate->exc_info; + while ((exc_info->exc_value == NULL || exc_info->exc_value == Py_None) && + exc_info->previous_item != NULL) + { + exc_info = exc_info->previous_item; + } + return exc_info; +} +#endif + +/* SaveResetException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSave(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + PyObject *exc_value = exc_info->exc_value; + if (exc_value == NULL || exc_value == Py_None) { + *value = NULL; + *type = NULL; + *tb = NULL; + } else { + *value = exc_value; + Py_INCREF(*value); + *type = (PyObject*) Py_TYPE(exc_value); + Py_INCREF(*type); + *tb = PyException_GetTraceback(exc_value); + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = __Pyx_PyErr_GetTopmostException(tstate); + *type = exc_info->exc_type; + *value = exc_info->exc_value; + *tb = exc_info->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #else + *type = tstate->exc_type; + *value = tstate->exc_value; + *tb = tstate->exc_traceback; + Py_XINCREF(*type); + Py_XINCREF(*value); + Py_XINCREF(*tb); + #endif +} +static CYTHON_INLINE void __Pyx__ExceptionReset(PyThreadState *tstate, PyObject *type, PyObject *value, PyObject *tb) { + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + PyObject *tmp_value = exc_info->exc_value; + exc_info->exc_value = value; + Py_XDECREF(tmp_value); + Py_XDECREF(type); + Py_XDECREF(tb); + #else + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = type; + exc_info->exc_value = value; + exc_info->exc_traceback = tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = type; + tstate->exc_value = value; + tstate->exc_traceback = tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); + #endif +} +#endif + +/* GetException */ +#if CYTHON_FAST_THREAD_STATE +static int __Pyx__GetException(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) +#else +static int __Pyx_GetException(PyObject **type, PyObject **value, PyObject **tb) +#endif +{ + PyObject *local_type = NULL, *local_value, *local_tb = NULL; +#if CYTHON_FAST_THREAD_STATE + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if PY_VERSION_HEX >= 0x030C00A6 + local_value = tstate->current_exception; + tstate->current_exception = 0; + if (likely(local_value)) { + local_type = (PyObject*) Py_TYPE(local_value); + Py_INCREF(local_type); + local_tb = PyException_GetTraceback(local_value); + } + #else + local_type = tstate->curexc_type; + local_value = tstate->curexc_value; + local_tb = tstate->curexc_traceback; + tstate->curexc_type = 0; + tstate->curexc_value = 0; + tstate->curexc_traceback = 0; + #endif +#else + PyErr_Fetch(&local_type, &local_value, &local_tb); +#endif + PyErr_NormalizeException(&local_type, &local_value, &local_tb); +#if CYTHON_FAST_THREAD_STATE && PY_VERSION_HEX >= 0x030C00A6 + if (unlikely(tstate->current_exception)) +#elif CYTHON_FAST_THREAD_STATE + if (unlikely(tstate->curexc_type)) +#else + if (unlikely(PyErr_Occurred())) +#endif + goto bad; + #if PY_MAJOR_VERSION >= 3 + if (local_tb) { + if (unlikely(PyException_SetTraceback(local_value, local_tb) < 0)) + goto bad; + } + #endif + Py_XINCREF(local_tb); + Py_XINCREF(local_type); + Py_XINCREF(local_value); + *type = local_type; + *value = local_value; + *tb = local_tb; +#if CYTHON_FAST_THREAD_STATE + #if CYTHON_USE_EXC_INFO_STACK + { + _PyErr_StackItem *exc_info = tstate->exc_info; + #if PY_VERSION_HEX >= 0x030B00a4 + tmp_value = exc_info->exc_value; + exc_info->exc_value = local_value; + tmp_type = NULL; + tmp_tb = NULL; + Py_XDECREF(local_type); + Py_XDECREF(local_tb); + #else + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = local_type; + exc_info->exc_value = local_value; + exc_info->exc_traceback = local_tb; + #endif + } + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = local_type; + tstate->exc_value = local_value; + tstate->exc_traceback = local_tb; + #endif + Py_XDECREF(tmp_type); + Py_XDECREF(tmp_value); + Py_XDECREF(tmp_tb); +#else + PyErr_SetExcInfo(local_type, local_value, local_tb); +#endif + return 0; +bad: + *type = 0; + *value = 0; + *tb = 0; + Py_XDECREF(local_type); + Py_XDECREF(local_value); + Py_XDECREF(local_tb); + return -1; +} + +/* SwapException */ +#if CYTHON_FAST_THREAD_STATE +static CYTHON_INLINE void __Pyx__ExceptionSwap(PyThreadState *tstate, PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + #if CYTHON_USE_EXC_INFO_STACK && PY_VERSION_HEX >= 0x030B00a4 + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_value = exc_info->exc_value; + exc_info->exc_value = *value; + if (tmp_value == NULL || tmp_value == Py_None) { + Py_XDECREF(tmp_value); + tmp_value = NULL; + tmp_type = NULL; + tmp_tb = NULL; + } else { + tmp_type = (PyObject*) Py_TYPE(tmp_value); + Py_INCREF(tmp_type); + #if CYTHON_COMPILING_IN_CPYTHON + tmp_tb = ((PyBaseExceptionObject*) tmp_value)->traceback; + Py_XINCREF(tmp_tb); + #else + tmp_tb = PyException_GetTraceback(tmp_value); + #endif + } + #elif CYTHON_USE_EXC_INFO_STACK + _PyErr_StackItem *exc_info = tstate->exc_info; + tmp_type = exc_info->exc_type; + tmp_value = exc_info->exc_value; + tmp_tb = exc_info->exc_traceback; + exc_info->exc_type = *type; + exc_info->exc_value = *value; + exc_info->exc_traceback = *tb; + #else + tmp_type = tstate->exc_type; + tmp_value = tstate->exc_value; + tmp_tb = tstate->exc_traceback; + tstate->exc_type = *type; + tstate->exc_value = *value; + tstate->exc_traceback = *tb; + #endif + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#else +static CYTHON_INLINE void __Pyx_ExceptionSwap(PyObject **type, PyObject **value, PyObject **tb) { + PyObject *tmp_type, *tmp_value, *tmp_tb; + PyErr_GetExcInfo(&tmp_type, &tmp_value, &tmp_tb); + PyErr_SetExcInfo(*type, *value, *tb); + *type = tmp_type; + *value = tmp_value; + *tb = tmp_tb; +} +#endif + +/* Import */ +static PyObject *__Pyx_Import(PyObject *name, PyObject *from_list, int level) { + PyObject *module = 0; + PyObject *empty_dict = 0; + PyObject *empty_list = 0; + #if PY_MAJOR_VERSION < 3 + PyObject *py_import; + py_import = __Pyx_PyObject_GetAttrStr(__pyx_b, __pyx_n_s_import); + if (unlikely(!py_import)) + goto bad; + if (!from_list) { + empty_list = PyList_New(0); + if (unlikely(!empty_list)) + goto bad; + from_list = empty_list; + } + #endif + empty_dict = PyDict_New(); + if (unlikely(!empty_dict)) + goto bad; + { + #if PY_MAJOR_VERSION >= 3 + if (level == -1) { + if ((1) && (strchr(__Pyx_MODULE_NAME, '.'))) { + module = PyImport_ImportModuleLevelObject( + name, __pyx_d, empty_dict, from_list, 1); + if (unlikely(!module)) { + if (unlikely(!PyErr_ExceptionMatches(PyExc_ImportError))) + goto bad; + PyErr_Clear(); + } + } + level = 0; + } + #endif + if (!module) { + #if PY_MAJOR_VERSION < 3 + PyObject *py_level = PyInt_FromLong(level); + if (unlikely(!py_level)) + goto bad; + module = PyObject_CallFunctionObjArgs(py_import, + name, __pyx_d, empty_dict, from_list, py_level, (PyObject *)NULL); + Py_DECREF(py_level); + #else + module = PyImport_ImportModuleLevelObject( + name, __pyx_d, empty_dict, from_list, level); + #endif + } + } +bad: + Py_XDECREF(empty_dict); + Py_XDECREF(empty_list); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_import); + #endif + return module; +} + +/* ImportDottedModule */ +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Error(PyObject *name, PyObject *parts_tuple, Py_ssize_t count) { + PyObject *partial_name = NULL, *slice = NULL, *sep = NULL; + if (unlikely(PyErr_Occurred())) { + PyErr_Clear(); + } + if (likely(PyTuple_GET_SIZE(parts_tuple) == count)) { + partial_name = name; + } else { + slice = PySequence_GetSlice(parts_tuple, 0, count); + if (unlikely(!slice)) + goto bad; + sep = PyUnicode_FromStringAndSize(".", 1); + if (unlikely(!sep)) + goto bad; + partial_name = PyUnicode_Join(sep, slice); + } + PyErr_Format( +#if PY_MAJOR_VERSION < 3 + PyExc_ImportError, + "No module named '%s'", PyString_AS_STRING(partial_name)); +#else +#if PY_VERSION_HEX >= 0x030600B1 + PyExc_ModuleNotFoundError, +#else + PyExc_ImportError, +#endif + "No module named '%U'", partial_name); +#endif +bad: + Py_XDECREF(sep); + Py_XDECREF(slice); + Py_XDECREF(partial_name); + return NULL; +} +#endif +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx__ImportDottedModule_Lookup(PyObject *name) { + PyObject *imported_module; +#if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + return NULL; + imported_module = __Pyx_PyDict_GetItemStr(modules, name); + Py_XINCREF(imported_module); +#else + imported_module = PyImport_GetModule(name); +#endif + return imported_module; +} +#endif +#if PY_MAJOR_VERSION >= 3 +static PyObject *__Pyx_ImportDottedModule_WalkParts(PyObject *module, PyObject *name, PyObject *parts_tuple) { + Py_ssize_t i, nparts; + nparts = PyTuple_GET_SIZE(parts_tuple); + for (i=1; i < nparts && module; i++) { + PyObject *part, *submodule; +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + part = PyTuple_GET_ITEM(parts_tuple, i); +#else + part = PySequence_ITEM(parts_tuple, i); +#endif + submodule = __Pyx_PyObject_GetAttrStrNoError(module, part); +#if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(part); +#endif + Py_DECREF(module); + module = submodule; + } + if (unlikely(!module)) { + return __Pyx__ImportDottedModule_Error(name, parts_tuple, i); + } + return module; +} +#endif +static PyObject *__Pyx__ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if PY_MAJOR_VERSION < 3 + PyObject *module, *from_list, *star = __pyx_n_s__3; + CYTHON_UNUSED_VAR(parts_tuple); + from_list = PyList_New(1); + if (unlikely(!from_list)) + return NULL; + Py_INCREF(star); + PyList_SET_ITEM(from_list, 0, star); + module = __Pyx_Import(name, from_list, 0); + Py_DECREF(from_list); + return module; +#else + PyObject *imported_module; + PyObject *module = __Pyx_Import(name, NULL, 0); + if (!parts_tuple || unlikely(!module)) + return module; + imported_module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(imported_module)) { + Py_DECREF(module); + return imported_module; + } + PyErr_Clear(); + return __Pyx_ImportDottedModule_WalkParts(module, name, parts_tuple); +#endif +} +static PyObject *__Pyx_ImportDottedModule(PyObject *name, PyObject *parts_tuple) { +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX >= 0x030400B1 + PyObject *module = __Pyx__ImportDottedModule_Lookup(name); + if (likely(module)) { + PyObject *spec = __Pyx_PyObject_GetAttrStrNoError(module, __pyx_n_s_spec); + if (likely(spec)) { + PyObject *unsafe = __Pyx_PyObject_GetAttrStrNoError(spec, __pyx_n_s_initializing); + if (likely(!unsafe || !__Pyx_PyObject_IsTrue(unsafe))) { + Py_DECREF(spec); + spec = NULL; + } + Py_XDECREF(unsafe); + } + if (likely(!spec)) { + PyErr_Clear(); + return module; + } + Py_DECREF(spec); + Py_DECREF(module); + } else if (PyErr_Occurred()) { + PyErr_Clear(); + } +#endif + return __Pyx__ImportDottedModule(name, parts_tuple); +} + +/* ssize_strlen */ +static CYTHON_INLINE Py_ssize_t __Pyx_ssize_strlen(const char *s) { + size_t len = strlen(s); + if (unlikely(len > PY_SSIZE_T_MAX)) { + PyErr_SetString(PyExc_OverflowError, "byte string is too long"); + return -1; + } + return (Py_ssize_t) len; +} + +/* FastTypeChecks */ +#if CYTHON_COMPILING_IN_CPYTHON +static int __Pyx_InBases(PyTypeObject *a, PyTypeObject *b) { + while (a) { + a = __Pyx_PyType_GetSlot(a, tp_base, PyTypeObject*); + if (a == b) + return 1; + } + return b == &PyBaseObject_Type; +} +static CYTHON_INLINE int __Pyx_IsSubtype(PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (a == b) return 1; + mro = a->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + if (PyTuple_GET_ITEM(mro, i) == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(a, b); +} +static CYTHON_INLINE int __Pyx_IsAnySubtype2(PyTypeObject *cls, PyTypeObject *a, PyTypeObject *b) { + PyObject *mro; + if (cls == a || cls == b) return 1; + mro = cls->tp_mro; + if (likely(mro)) { + Py_ssize_t i, n; + n = PyTuple_GET_SIZE(mro); + for (i = 0; i < n; i++) { + PyObject *base = PyTuple_GET_ITEM(mro, i); + if (base == (PyObject *)a || base == (PyObject *)b) + return 1; + } + return 0; + } + return __Pyx_InBases(cls, a) || __Pyx_InBases(cls, b); +} +#if PY_MAJOR_VERSION == 2 +static int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject* exc_type2) { + PyObject *exception, *value, *tb; + int res; + __Pyx_PyThreadState_declare + __Pyx_PyThreadState_assign + __Pyx_ErrFetch(&exception, &value, &tb); + res = exc_type1 ? PyObject_IsSubclass(err, exc_type1) : 0; + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + if (!res) { + res = PyObject_IsSubclass(err, exc_type2); + if (unlikely(res == -1)) { + PyErr_WriteUnraisable(err); + res = 0; + } + } + __Pyx_ErrRestore(exception, value, tb); + return res; +} +#else +static CYTHON_INLINE int __Pyx_inner_PyErr_GivenExceptionMatches2(PyObject *err, PyObject* exc_type1, PyObject *exc_type2) { + if (exc_type1) { + return __Pyx_IsAnySubtype2((PyTypeObject*)err, (PyTypeObject*)exc_type1, (PyTypeObject*)exc_type2); + } else { + return __Pyx_IsSubtype((PyTypeObject*)err, (PyTypeObject*)exc_type2); + } +} +#endif +static int __Pyx_PyErr_GivenExceptionMatchesTuple(PyObject *exc_type, PyObject *tuple) { + Py_ssize_t i, n; + assert(PyExceptionClass_Check(exc_type)); + n = PyTuple_GET_SIZE(tuple); +#if PY_MAJOR_VERSION >= 3 + for (i=0; itp_as_sequence && type->tp_as_sequence->sq_repeat)) { + return type->tp_as_sequence->sq_repeat(seq, mul); + } else +#endif + { + return __Pyx_PySequence_Multiply_Generic(seq, mul); + } +} + +/* SetItemInt */ +static int __Pyx_SetItemInt_Generic(PyObject *o, PyObject *j, PyObject *v) { + int r; + if (unlikely(!j)) return -1; + r = PyObject_SetItem(o, j, v); + Py_DECREF(j); + return r; +} +static CYTHON_INLINE int __Pyx_SetItemInt_Fast(PyObject *o, Py_ssize_t i, PyObject *v, int is_list, + CYTHON_NCP_UNUSED int wraparound, CYTHON_NCP_UNUSED int boundscheck) { +#if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS && CYTHON_USE_TYPE_SLOTS + if (is_list || PyList_CheckExact(o)) { + Py_ssize_t n = (!wraparound) ? i : ((likely(i >= 0)) ? i : i + PyList_GET_SIZE(o)); + if ((!boundscheck) || likely(__Pyx_is_valid_index(n, PyList_GET_SIZE(o)))) { + PyObject* old = PyList_GET_ITEM(o, n); + Py_INCREF(v); + PyList_SET_ITEM(o, n, v); + Py_DECREF(old); + return 1; + } + } else { + PyMappingMethods *mm = Py_TYPE(o)->tp_as_mapping; + PySequenceMethods *sm = Py_TYPE(o)->tp_as_sequence; + if (mm && mm->mp_ass_subscript) { + int r; + PyObject *key = PyInt_FromSsize_t(i); + if (unlikely(!key)) return -1; + r = mm->mp_ass_subscript(o, key, v); + Py_DECREF(key); + return r; + } + if (likely(sm && sm->sq_ass_item)) { + if (wraparound && unlikely(i < 0) && likely(sm->sq_length)) { + Py_ssize_t l = sm->sq_length(o); + if (likely(l >= 0)) { + i += l; + } else { + if (!PyErr_ExceptionMatches(PyExc_OverflowError)) + return -1; + PyErr_Clear(); + } + } + return sm->sq_ass_item(o, i, v); + } + } +#else +#if CYTHON_COMPILING_IN_PYPY + if (is_list || (PySequence_Check(o) && !PyDict_Check(o))) +#else + if (is_list || PySequence_Check(o)) +#endif + { + return PySequence_SetItem(o, i, v); + } +#endif + return __Pyx_SetItemInt_Generic(o, PyInt_FromSsize_t(i), v); +} + +/* RaiseUnboundLocalError */ +static CYTHON_INLINE void __Pyx_RaiseUnboundLocalError(const char *varname) { + PyErr_Format(PyExc_UnboundLocalError, "local variable '%s' referenced before assignment", varname); +} + +/* DivInt[long] */ +static CYTHON_INLINE long __Pyx_div_long(long a, long b) { + long q = a / b; + long r = a - q*b; + q -= ((r != 0) & ((r ^ b) < 0)); + return q; +} + +/* ImportFrom */ +static PyObject* __Pyx_ImportFrom(PyObject* module, PyObject* name) { + PyObject* value = __Pyx_PyObject_GetAttrStr(module, name); + if (unlikely(!value) && PyErr_ExceptionMatches(PyExc_AttributeError)) { + const char* module_name_str = 0; + PyObject* module_name = 0; + PyObject* module_dot = 0; + PyObject* full_name = 0; + PyErr_Clear(); + module_name_str = PyModule_GetName(module); + if (unlikely(!module_name_str)) { goto modbad; } + module_name = PyUnicode_FromString(module_name_str); + if (unlikely(!module_name)) { goto modbad; } + module_dot = PyUnicode_Concat(module_name, __pyx_kp_u__2); + if (unlikely(!module_dot)) { goto modbad; } + full_name = PyUnicode_Concat(module_dot, name); + if (unlikely(!full_name)) { goto modbad; } + #if PY_VERSION_HEX < 0x030700A1 || (CYTHON_COMPILING_IN_PYPY && PYPY_VERSION_NUM < 0x07030400) + { + PyObject *modules = PyImport_GetModuleDict(); + if (unlikely(!modules)) + goto modbad; + value = PyObject_GetItem(modules, full_name); + } + #else + value = PyImport_GetModule(full_name); + #endif + modbad: + Py_XDECREF(full_name); + Py_XDECREF(module_dot); + Py_XDECREF(module_name); + } + if (unlikely(!value)) { + PyErr_Format(PyExc_ImportError, + #if PY_MAJOR_VERSION < 3 + "cannot import name %.230s", PyString_AS_STRING(name)); + #else + "cannot import name %S", name); + #endif + } + return value; +} + +/* HasAttr */ +static CYTHON_INLINE int __Pyx_HasAttr(PyObject *o, PyObject *n) { + PyObject *r; + if (unlikely(!__Pyx_PyBaseString_Check(n))) { + PyErr_SetString(PyExc_TypeError, + "hasattr(): attribute name must be string"); + return -1; + } + r = __Pyx_GetAttr(o, n); + if (!r) { + PyErr_Clear(); + return 0; + } else { + Py_DECREF(r); + return 1; + } +} + +/* PyFloatBinop */ +#if !CYTHON_COMPILING_IN_PYPY +static PyObject* __Pyx_PyFloat_TrueDivideCObj(PyObject *op1, PyObject *op2, double floatval, int inplace, int zerodivision_check) { + const double a = floatval; + double b, result; + CYTHON_UNUSED_VAR(inplace); + CYTHON_UNUSED_VAR(zerodivision_check); + if (likely(PyFloat_CheckExact(op2))) { +#if CYTHON_COMPILING_IN_LIMITED_API + b = __pyx_PyFloat_AsDouble(op2); +#else + b = PyFloat_AS_DOUBLE(op2); +#endif + if (unlikely(zerodivision_check && ((b) == 0.0))) { PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); return NULL;} + } else + #if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(op2))) { + b = (double) PyInt_AS_LONG(op2); + if (unlikely(zerodivision_check && ((b) == 0.0))) { PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); return NULL;} + } else + #endif + if (likely(PyLong_CheckExact(op2))) { + #if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsZero(op2)) { + b = 0.0; + if (unlikely(zerodivision_check && ((b) == 0.0))) { PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); return NULL;} + } else if (__Pyx_PyLong_IsCompact(op2)) { + b = (double) __Pyx_PyLong_CompactValue(op2); + } else { + const digit* digits = __Pyx_PyLong_Digits(op2); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(op2); + switch (size) { + case -2: + case 2: + if (8 * sizeof(unsigned long) > 2 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (1 * PyLong_SHIFT < 53))) { + b = (double) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == -2) + b = -b; + break; + } + } + CYTHON_FALLTHROUGH; + case -3: + case 3: + if (8 * sizeof(unsigned long) > 3 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (2 * PyLong_SHIFT < 53))) { + b = (double) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == -3) + b = -b; + break; + } + } + CYTHON_FALLTHROUGH; + case -4: + case 4: + if (8 * sizeof(unsigned long) > 4 * PyLong_SHIFT && ((8 * sizeof(unsigned long) < 53) || (3 * PyLong_SHIFT < 53))) { + b = (double) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0])); + if ((8 * sizeof(unsigned long) < 53) || (4 * PyLong_SHIFT < 53) || (b < (double) ((PY_LONG_LONG)1 << 53))) { + if (size == -4) + b = -b; + break; + } + } + CYTHON_FALLTHROUGH; + default: + #endif + b = PyLong_AsDouble(op2); + if (unlikely(b == -1.0 && PyErr_Occurred())) return NULL; + #if !CYTHON_USE_PYLONG_INTERNALS + if (unlikely(zerodivision_check && ((b) == 0.0))) { PyErr_SetString(PyExc_ZeroDivisionError, "float division by zero"); return NULL;} + #endif + #if CYTHON_USE_PYLONG_INTERNALS + } + } + #endif + } else { + return (inplace ? PyNumber_InPlaceTrueDivide : PyNumber_TrueDivide)(op1, op2); + } + PyFPE_START_PROTECT("divide", return NULL) + result = a / b; + PyFPE_END_PROTECT(result) + return PyFloat_FromDouble(result); +} +#endif + +/* UnpackUnboundCMethod */ +static PyObject *__Pyx_SelflessCall(PyObject *method, PyObject *args, PyObject *kwargs) { + PyObject *selfless_args = PyTuple_GetSlice(args, 1, PyTuple_Size(args)); + if (unlikely(!selfless_args)) return NULL; + PyObject *result = PyObject_Call(method, selfless_args, kwargs); + Py_DECREF(selfless_args); + return result; +} +static PyMethodDef __Pyx_UnboundCMethod_Def = { + "CythonUnboundCMethod", + __PYX_REINTERPRET_FUNCION(PyCFunction, __Pyx_SelflessCall), + METH_VARARGS | METH_KEYWORDS, + NULL +}; +static int __Pyx_TryUnpackUnboundCMethod(__Pyx_CachedCFunction* target) { + PyObject *method; + method = __Pyx_PyObject_GetAttrStr(target->type, *target->method_name); + if (unlikely(!method)) + return -1; + target->method = method; +#if CYTHON_COMPILING_IN_CPYTHON + #if PY_MAJOR_VERSION >= 3 + if (likely(__Pyx_TypeCheck(method, &PyMethodDescr_Type))) + #else + if (likely(!PyCFunction_Check(method))) + #endif + { + PyMethodDescrObject *descr = (PyMethodDescrObject*) method; + target->func = descr->d_method->ml_meth; + target->flag = descr->d_method->ml_flags & ~(METH_CLASS | METH_STATIC | METH_COEXIST | METH_STACKLESS); + } else +#endif +#if defined(CYTHON_COMPILING_IN_PYPY) +#elif PY_VERSION_HEX >= 0x03090000 + if (PyCFunction_CheckExact(method)) +#else + if (PyCFunction_Check(method)) +#endif + { + PyObject *self; + int self_found; +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + self = PyObject_GetAttrString(method, "__self__"); + if (!self) { + PyErr_Clear(); + } +#else + self = PyCFunction_GET_SELF(method); +#endif + self_found = (self && self != Py_None); +#if CYTHON_COMPILING_IN_LIMITED_API || CYTHON_COMPILING_IN_PYPY + Py_XDECREF(self); +#endif + if (self_found) { + PyObject *unbound_method = PyCFunction_New(&__Pyx_UnboundCMethod_Def, method); + if (unlikely(!unbound_method)) return -1; + Py_DECREF(method); + target->method = unbound_method; + } + } + return 0; +} + +/* CallUnboundCMethod0 */ +static PyObject* __Pyx__CallUnboundCMethod0(__Pyx_CachedCFunction* cfunc, PyObject* self) { + PyObject *args, *result = NULL; + if (unlikely(!cfunc->method) && unlikely(__Pyx_TryUnpackUnboundCMethod(cfunc) < 0)) return NULL; +#if CYTHON_ASSUME_SAFE_MACROS + args = PyTuple_New(1); + if (unlikely(!args)) goto bad; + Py_INCREF(self); + PyTuple_SET_ITEM(args, 0, self); +#else + args = PyTuple_Pack(1, self); + if (unlikely(!args)) goto bad; +#endif + result = __Pyx_PyObject_Call(cfunc->method, args, NULL); + Py_DECREF(args); +bad: + return result; +} + +/* PyObject_GenericGetAttrNoDict */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject *__Pyx_RaiseGenericGetAttributeError(PyTypeObject *tp, PyObject *attr_name) { + __Pyx_TypeName type_name = __Pyx_PyType_GetName(tp); + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", + type_name, attr_name); +#else + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", + type_name, PyString_AS_STRING(attr_name)); +#endif + __Pyx_DECREF_TypeName(type_name); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyObject_GenericGetAttrNoDict(PyObject* obj, PyObject* attr_name) { + PyObject *descr; + PyTypeObject *tp = Py_TYPE(obj); + if (unlikely(!PyString_Check(attr_name))) { + return PyObject_GenericGetAttr(obj, attr_name); + } + assert(!tp->tp_dictoffset); + descr = _PyType_Lookup(tp, attr_name); + if (unlikely(!descr)) { + return __Pyx_RaiseGenericGetAttributeError(tp, attr_name); + } + Py_INCREF(descr); + #if PY_MAJOR_VERSION < 3 + if (likely(PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_HAVE_CLASS))) + #endif + { + descrgetfunc f = Py_TYPE(descr)->tp_descr_get; + if (unlikely(f)) { + PyObject *res = f(descr, obj, (PyObject *)tp); + Py_DECREF(descr); + return res; + } + } + return descr; +} +#endif + +/* PyObject_GenericGetAttr */ +#if CYTHON_USE_TYPE_SLOTS && CYTHON_USE_PYTYPE_LOOKUP && PY_VERSION_HEX < 0x03070000 +static PyObject* __Pyx_PyObject_GenericGetAttr(PyObject* obj, PyObject* attr_name) { + if (unlikely(Py_TYPE(obj)->tp_dictoffset)) { + return PyObject_GenericGetAttr(obj, attr_name); + } + return __Pyx_PyObject_GenericGetAttrNoDict(obj, attr_name); +} +#endif + +/* FixUpExtensionType */ +#if CYTHON_USE_TYPE_SPECS +static int __Pyx_fix_up_extension_type_from_spec(PyType_Spec *spec, PyTypeObject *type) { +#if PY_VERSION_HEX > 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + CYTHON_UNUSED_VAR(spec); + CYTHON_UNUSED_VAR(type); +#else + const PyType_Slot *slot = spec->slots; + while (slot && slot->slot && slot->slot != Py_tp_members) + slot++; + if (slot && slot->slot == Py_tp_members) { + int changed = 0; +#if !(PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON) + const +#endif + PyMemberDef *memb = (PyMemberDef*) slot->pfunc; + while (memb && memb->name) { + if (memb->name[0] == '_' && memb->name[1] == '_') { +#if PY_VERSION_HEX < 0x030900b1 + if (strcmp(memb->name, "__weaklistoffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_weaklistoffset = memb->offset; + changed = 1; + } + else if (strcmp(memb->name, "__dictoffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); + type->tp_dictoffset = memb->offset; + changed = 1; + } +#if CYTHON_METH_FASTCALL + else if (strcmp(memb->name, "__vectorcalloffset__") == 0) { + assert(memb->type == T_PYSSIZET); + assert(memb->flags == READONLY); +#if PY_VERSION_HEX >= 0x030800b4 + type->tp_vectorcall_offset = memb->offset; +#else + type->tp_print = (printfunc) memb->offset; +#endif + changed = 1; + } +#endif +#else + if ((0)); +#endif +#if PY_VERSION_HEX <= 0x030900b1 && CYTHON_COMPILING_IN_CPYTHON + else if (strcmp(memb->name, "__module__") == 0) { + PyObject *descr; + assert(memb->type == T_OBJECT); + assert(memb->flags == 0 || memb->flags == READONLY); + descr = PyDescr_NewMember(type, memb); + if (unlikely(!descr)) + return -1; + if (unlikely(PyDict_SetItem(type->tp_dict, PyDescr_NAME(descr), descr) < 0)) { + Py_DECREF(descr); + return -1; + } + Py_DECREF(descr); + changed = 1; + } +#endif + } + memb++; + } + if (changed) + PyType_Modified(type); + } +#endif + return 0; +} +#endif + +/* PyObjectCallNoArg */ +static CYTHON_INLINE PyObject* __Pyx_PyObject_CallNoArg(PyObject *func) { + PyObject *arg = NULL; + return __Pyx_PyObject_FastCall(func, (&arg)+1, 0 | __Pyx_PY_VECTORCALL_ARGUMENTS_OFFSET); +} + +/* PyObjectGetMethod */ +static int __Pyx_PyObject_GetMethod(PyObject *obj, PyObject *name, PyObject **method) { + PyObject *attr; +#if CYTHON_UNPACK_METHODS && CYTHON_COMPILING_IN_CPYTHON && CYTHON_USE_PYTYPE_LOOKUP + __Pyx_TypeName type_name; + PyTypeObject *tp = Py_TYPE(obj); + PyObject *descr; + descrgetfunc f = NULL; + PyObject **dictptr, *dict; + int meth_found = 0; + assert (*method == NULL); + if (unlikely(tp->tp_getattro != PyObject_GenericGetAttr)) { + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; + } + if (unlikely(tp->tp_dict == NULL) && unlikely(PyType_Ready(tp) < 0)) { + return 0; + } + descr = _PyType_Lookup(tp, name); + if (likely(descr != NULL)) { + Py_INCREF(descr); +#if defined(Py_TPFLAGS_METHOD_DESCRIPTOR) && Py_TPFLAGS_METHOD_DESCRIPTOR + if (__Pyx_PyType_HasFeature(Py_TYPE(descr), Py_TPFLAGS_METHOD_DESCRIPTOR)) +#elif PY_MAJOR_VERSION >= 3 + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr) || __Pyx_IS_TYPE(descr, &PyMethodDescr_Type))) + #endif +#else + #ifdef __Pyx_CyFunction_USED + if (likely(PyFunction_Check(descr) || __Pyx_CyFunction_Check(descr))) + #else + if (likely(PyFunction_Check(descr))) + #endif +#endif + { + meth_found = 1; + } else { + f = Py_TYPE(descr)->tp_descr_get; + if (f != NULL && PyDescr_IsData(descr)) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + } + } + dictptr = _PyObject_GetDictPtr(obj); + if (dictptr != NULL && (dict = *dictptr) != NULL) { + Py_INCREF(dict); + attr = __Pyx_PyDict_GetItemStr(dict, name); + if (attr != NULL) { + Py_INCREF(attr); + Py_DECREF(dict); + Py_XDECREF(descr); + goto try_unpack; + } + Py_DECREF(dict); + } + if (meth_found) { + *method = descr; + return 1; + } + if (f != NULL) { + attr = f(descr, obj, (PyObject *)Py_TYPE(obj)); + Py_DECREF(descr); + goto try_unpack; + } + if (likely(descr != NULL)) { + *method = descr; + return 0; + } + type_name = __Pyx_PyType_GetName(tp); + PyErr_Format(PyExc_AttributeError, +#if PY_MAJOR_VERSION >= 3 + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%U'", + type_name, name); +#else + "'" __Pyx_FMT_TYPENAME "' object has no attribute '%.400s'", + type_name, PyString_AS_STRING(name)); +#endif + __Pyx_DECREF_TypeName(type_name); + return 0; +#else + attr = __Pyx_PyObject_GetAttrStr(obj, name); + goto try_unpack; +#endif +try_unpack: +#if CYTHON_UNPACK_METHODS + if (likely(attr) && PyMethod_Check(attr) && likely(PyMethod_GET_SELF(attr) == obj)) { + PyObject *function = PyMethod_GET_FUNCTION(attr); + Py_INCREF(function); + Py_DECREF(attr); + *method = function; + return 1; + } +#endif + *method = attr; + return 0; +} + +/* PyObjectCallMethod0 */ +static PyObject* __Pyx_PyObject_CallMethod0(PyObject* obj, PyObject* method_name) { + PyObject *method = NULL, *result = NULL; + int is_method = __Pyx_PyObject_GetMethod(obj, method_name, &method); + if (likely(is_method)) { + result = __Pyx_PyObject_CallOneArg(method, obj); + Py_DECREF(method); + return result; + } + if (unlikely(!method)) goto bad; + result = __Pyx_PyObject_CallNoArg(method); + Py_DECREF(method); +bad: + return result; +} + +/* ValidateBasesTuple */ +#if CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API || CYTHON_USE_TYPE_SPECS +static int __Pyx_validate_bases_tuple(const char *type_name, Py_ssize_t dictoffset, PyObject *bases) { + Py_ssize_t i, n; +#if CYTHON_ASSUME_SAFE_MACROS + n = PyTuple_GET_SIZE(bases); +#else + n = PyTuple_Size(bases); + if (n < 0) return -1; +#endif + for (i = 1; i < n; i++) + { +#if CYTHON_AVOID_BORROWED_REFS + PyObject *b0 = PySequence_GetItem(bases, i); + if (!b0) return -1; +#elif CYTHON_ASSUME_SAFE_MACROS + PyObject *b0 = PyTuple_GET_ITEM(bases, i); +#else + PyObject *b0 = PyTuple_GetItem(bases, i); + if (!b0) return -1; +#endif + PyTypeObject *b; +#if PY_MAJOR_VERSION < 3 + if (PyClass_Check(b0)) + { + PyErr_Format(PyExc_TypeError, "base class '%.200s' is an old-style class", + PyString_AS_STRING(((PyClassObject*)b0)->cl_name)); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } +#endif + b = (PyTypeObject*) b0; + if (!__Pyx_PyType_HasFeature(b, Py_TPFLAGS_HEAPTYPE)) + { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); + PyErr_Format(PyExc_TypeError, + "base class '" __Pyx_FMT_TYPENAME "' is not a heap type", b_name); + __Pyx_DECREF_TypeName(b_name); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } +#if !CYTHON_USE_TYPE_SLOTS + if (dictoffset == 0) { + PyErr_Format(PyExc_TypeError, + "extension type '%s.200s': " + "unable to validate whether bases have a __dict__ " + "when CYTHON_USE_TYPE_SLOTS is off " + "(likely because you are building in the limited API). " + "Therefore, all extension types with multiple bases " + "must add 'cdef dict __dict__' in this compilation mode", + type_name); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } +#else + if (dictoffset == 0 && b->tp_dictoffset) + { + __Pyx_TypeName b_name = __Pyx_PyType_GetName(b); + PyErr_Format(PyExc_TypeError, + "extension type '%.200s' has no __dict__ slot, " + "but base type '" __Pyx_FMT_TYPENAME "' has: " + "either add 'cdef dict __dict__' to the extension type " + "or add '__slots__ = [...]' to the base type", + type_name, b_name); + __Pyx_DECREF_TypeName(b_name); +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + return -1; + } +#endif +#if CYTHON_AVOID_BORROWED_REFS + Py_DECREF(b0); +#endif + } + return 0; +} +#endif + +/* PyType_Ready */ +static int __Pyx_PyType_Ready(PyTypeObject *t) { +#if CYTHON_USE_TYPE_SPECS || !(CYTHON_COMPILING_IN_CPYTHON || CYTHON_COMPILING_IN_LIMITED_API) || defined(PYSTON_MAJOR_VERSION) + (void)__Pyx_PyObject_CallMethod0; +#if CYTHON_USE_TYPE_SPECS + (void)__Pyx_validate_bases_tuple; +#endif + return PyType_Ready(t); +#else + int r; + PyObject *bases = __Pyx_PyType_GetSlot(t, tp_bases, PyObject*); + if (bases && unlikely(__Pyx_validate_bases_tuple(t->tp_name, t->tp_dictoffset, bases) == -1)) + return -1; +#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) + { + int gc_was_enabled; + #if PY_VERSION_HEX >= 0x030A00b1 + gc_was_enabled = PyGC_Disable(); + (void)__Pyx_PyObject_CallMethod0; + #else + PyObject *ret, *py_status; + PyObject *gc = NULL; + #if PY_VERSION_HEX >= 0x030700a1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM+0 >= 0x07030400) + gc = PyImport_GetModule(__pyx_kp_u_gc); + #endif + if (unlikely(!gc)) gc = PyImport_Import(__pyx_kp_u_gc); + if (unlikely(!gc)) return -1; + py_status = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_isenabled); + if (unlikely(!py_status)) { + Py_DECREF(gc); + return -1; + } + gc_was_enabled = __Pyx_PyObject_IsTrue(py_status); + Py_DECREF(py_status); + if (gc_was_enabled > 0) { + ret = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_disable); + if (unlikely(!ret)) { + Py_DECREF(gc); + return -1; + } + Py_DECREF(ret); + } else if (unlikely(gc_was_enabled == -1)) { + Py_DECREF(gc); + return -1; + } + #endif + t->tp_flags |= Py_TPFLAGS_HEAPTYPE; +#if PY_VERSION_HEX >= 0x030A0000 + t->tp_flags |= Py_TPFLAGS_IMMUTABLETYPE; +#endif +#else + (void)__Pyx_PyObject_CallMethod0; +#endif + r = PyType_Ready(t); +#if PY_VERSION_HEX >= 0x03050000 && !defined(PYSTON_MAJOR_VERSION) + t->tp_flags &= ~Py_TPFLAGS_HEAPTYPE; + #if PY_VERSION_HEX >= 0x030A00b1 + if (gc_was_enabled) + PyGC_Enable(); + #else + if (gc_was_enabled) { + PyObject *tp, *v, *tb; + PyErr_Fetch(&tp, &v, &tb); + ret = __Pyx_PyObject_CallMethod0(gc, __pyx_kp_u_enable); + if (likely(ret || r == -1)) { + Py_XDECREF(ret); + PyErr_Restore(tp, v, tb); + } else { + Py_XDECREF(tp); + Py_XDECREF(v); + Py_XDECREF(tb); + r = -1; + } + } + Py_DECREF(gc); + #endif + } +#endif + return r; +#endif +} + +/* SetVTable */ +static int __Pyx_SetVtable(PyTypeObject *type, void *vtable) { + PyObject *ob = PyCapsule_New(vtable, 0, 0); + if (unlikely(!ob)) + goto bad; +#if CYTHON_COMPILING_IN_LIMITED_API + if (unlikely(PyObject_SetAttr((PyObject *) type, __pyx_n_s_pyx_vtable, ob) < 0)) +#else + if (unlikely(PyDict_SetItem(type->tp_dict, __pyx_n_s_pyx_vtable, ob) < 0)) +#endif + goto bad; + Py_DECREF(ob); + return 0; +bad: + Py_XDECREF(ob); + return -1; +} + +/* GetVTable */ +static void* __Pyx_GetVtable(PyTypeObject *type) { + void* ptr; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *ob = PyObject_GetAttr((PyObject *)type, __pyx_n_s_pyx_vtable); +#else + PyObject *ob = PyObject_GetItem(type->tp_dict, __pyx_n_s_pyx_vtable); +#endif + if (!ob) + goto bad; + ptr = PyCapsule_GetPointer(ob, 0); + if (!ptr && !PyErr_Occurred()) + PyErr_SetString(PyExc_RuntimeError, "invalid vtable found for imported type"); + Py_DECREF(ob); + return ptr; +bad: + Py_XDECREF(ob); + return NULL; +} + +/* MergeVTables */ +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_MergeVtables(PyTypeObject *type) { + int i; + void** base_vtables; + __Pyx_TypeName tp_base_name; + __Pyx_TypeName base_name; + void* unknown = (void*)-1; + PyObject* bases = type->tp_bases; + int base_depth = 0; + { + PyTypeObject* base = type->tp_base; + while (base) { + base_depth += 1; + base = base->tp_base; + } + } + base_vtables = (void**) malloc(sizeof(void*) * (size_t)(base_depth + 1)); + base_vtables[0] = unknown; + for (i = 1; i < PyTuple_GET_SIZE(bases); i++) { + void* base_vtable = __Pyx_GetVtable(((PyTypeObject*)PyTuple_GET_ITEM(bases, i))); + if (base_vtable != NULL) { + int j; + PyTypeObject* base = type->tp_base; + for (j = 0; j < base_depth; j++) { + if (base_vtables[j] == unknown) { + base_vtables[j] = __Pyx_GetVtable(base); + base_vtables[j + 1] = unknown; + } + if (base_vtables[j] == base_vtable) { + break; + } else if (base_vtables[j] == NULL) { + goto bad; + } + base = base->tp_base; + } + } + } + PyErr_Clear(); + free(base_vtables); + return 0; +bad: + tp_base_name = __Pyx_PyType_GetName(type->tp_base); + base_name = __Pyx_PyType_GetName((PyTypeObject*)PyTuple_GET_ITEM(bases, i)); + PyErr_Format(PyExc_TypeError, + "multiple bases have vtable conflict: '" __Pyx_FMT_TYPENAME "' and '" __Pyx_FMT_TYPENAME "'", tp_base_name, base_name); + __Pyx_DECREF_TypeName(tp_base_name); + __Pyx_DECREF_TypeName(base_name); + free(base_vtables); + return -1; +} +#endif + +/* SetupReduce */ +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __Pyx_setup_reduce_is_named(PyObject* meth, PyObject* name) { + int ret; + PyObject *name_attr; + name_attr = __Pyx_PyObject_GetAttrStrNoError(meth, __pyx_n_s_name_2); + if (likely(name_attr)) { + ret = PyObject_RichCompareBool(name_attr, name, Py_EQ); + } else { + ret = -1; + } + if (unlikely(ret < 0)) { + PyErr_Clear(); + ret = 0; + } + Py_XDECREF(name_attr); + return ret; +} +static int __Pyx_setup_reduce(PyObject* type_obj) { + int ret = 0; + PyObject *object_reduce = NULL; + PyObject *object_getstate = NULL; + PyObject *object_reduce_ex = NULL; + PyObject *reduce = NULL; + PyObject *reduce_ex = NULL; + PyObject *reduce_cython = NULL; + PyObject *setstate = NULL; + PyObject *setstate_cython = NULL; + PyObject *getstate = NULL; +#if CYTHON_USE_PYTYPE_LOOKUP + getstate = _PyType_Lookup((PyTypeObject*)type_obj, __pyx_n_s_getstate); +#else + getstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_getstate); + if (!getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (getstate) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_getstate = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_getstate); +#else + object_getstate = __Pyx_PyObject_GetAttrStrNoError((PyObject*)&PyBaseObject_Type, __pyx_n_s_getstate); + if (!object_getstate && PyErr_Occurred()) { + goto __PYX_BAD; + } +#endif + if (object_getstate != getstate) { + goto __PYX_GOOD; + } + } +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce_ex = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#else + object_reduce_ex = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce_ex); if (!object_reduce_ex) goto __PYX_BAD; +#endif + reduce_ex = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce_ex); if (unlikely(!reduce_ex)) goto __PYX_BAD; + if (reduce_ex == object_reduce_ex) { +#if CYTHON_USE_PYTYPE_LOOKUP + object_reduce = _PyType_Lookup(&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#else + object_reduce = __Pyx_PyObject_GetAttrStr((PyObject*)&PyBaseObject_Type, __pyx_n_s_reduce); if (!object_reduce) goto __PYX_BAD; +#endif + reduce = __Pyx_PyObject_GetAttrStr(type_obj, __pyx_n_s_reduce); if (unlikely(!reduce)) goto __PYX_BAD; + if (reduce == object_reduce || __Pyx_setup_reduce_is_named(reduce, __pyx_n_s_reduce_cython)) { + reduce_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_reduce_cython); + if (likely(reduce_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce, reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_reduce_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (reduce == object_reduce || PyErr_Occurred()) { + goto __PYX_BAD; + } + setstate = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate); + if (!setstate) PyErr_Clear(); + if (!setstate || __Pyx_setup_reduce_is_named(setstate, __pyx_n_s_setstate_cython)) { + setstate_cython = __Pyx_PyObject_GetAttrStrNoError(type_obj, __pyx_n_s_setstate_cython); + if (likely(setstate_cython)) { + ret = PyDict_SetItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate, setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + ret = PyDict_DelItem(((PyTypeObject*)type_obj)->tp_dict, __pyx_n_s_setstate_cython); if (unlikely(ret < 0)) goto __PYX_BAD; + } else if (!setstate || PyErr_Occurred()) { + goto __PYX_BAD; + } + } + PyType_Modified((PyTypeObject*)type_obj); + } + } + goto __PYX_GOOD; +__PYX_BAD: + if (!PyErr_Occurred()) { + __Pyx_TypeName type_obj_name = + __Pyx_PyType_GetName((PyTypeObject*)type_obj); + PyErr_Format(PyExc_RuntimeError, + "Unable to initialize pickling for " __Pyx_FMT_TYPENAME, type_obj_name); + __Pyx_DECREF_TypeName(type_obj_name); + } + ret = -1; +__PYX_GOOD: +#if !CYTHON_USE_PYTYPE_LOOKUP + Py_XDECREF(object_reduce); + Py_XDECREF(object_reduce_ex); + Py_XDECREF(object_getstate); + Py_XDECREF(getstate); +#endif + Py_XDECREF(reduce); + Py_XDECREF(reduce_ex); + Py_XDECREF(reduce_cython); + Py_XDECREF(setstate); + Py_XDECREF(setstate_cython); + return ret; +} +#endif + +/* TypeImport */ +#ifndef __PYX_HAVE_RT_ImportType_3_0_2 +#define __PYX_HAVE_RT_ImportType_3_0_2 +static PyTypeObject *__Pyx_ImportType_3_0_2(PyObject *module, const char *module_name, const char *class_name, + size_t size, size_t alignment, enum __Pyx_ImportType_CheckSize_3_0_2 check_size) +{ + PyObject *result = 0; + char warning[200]; + Py_ssize_t basicsize; + Py_ssize_t itemsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + PyObject *py_itemsize; +#endif + result = PyObject_GetAttrString(module, class_name); + if (!result) + goto bad; + if (!PyType_Check(result)) { + PyErr_Format(PyExc_TypeError, + "%.200s.%.200s is not a type object", + module_name, class_name); + goto bad; + } +#if !CYTHON_COMPILING_IN_LIMITED_API + basicsize = ((PyTypeObject *)result)->tp_basicsize; + itemsize = ((PyTypeObject *)result)->tp_itemsize; +#else + py_basicsize = PyObject_GetAttrString(result, "__basicsize__"); + if (!py_basicsize) + goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (basicsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; + py_itemsize = PyObject_GetAttrString(result, "__itemsize__"); + if (!py_itemsize) + goto bad; + itemsize = PyLong_AsSsize_t(py_itemsize); + Py_DECREF(py_itemsize); + py_itemsize = 0; + if (itemsize == (Py_ssize_t)-1 && PyErr_Occurred()) + goto bad; +#endif + if (itemsize) { + if (size % alignment) { + alignment = size % alignment; + } + if (itemsize < (Py_ssize_t)alignment) + itemsize = (Py_ssize_t)alignment; + } + if ((size_t)(basicsize + itemsize) < size) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize+itemsize); + goto bad; + } + if (check_size == __Pyx_ImportType_CheckSize_Error_3_0_2 && + ((size_t)basicsize > size || (size_t)(basicsize + itemsize) < size)) { + PyErr_Format(PyExc_ValueError, + "%.200s.%.200s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd-%zd from PyObject", + module_name, class_name, size, basicsize, basicsize+itemsize); + goto bad; + } + else if (check_size == __Pyx_ImportType_CheckSize_Warn_3_0_2 && (size_t)basicsize > size) { + PyOS_snprintf(warning, sizeof(warning), + "%s.%s size changed, may indicate binary incompatibility. " + "Expected %zd from C header, got %zd from PyObject", + module_name, class_name, size, basicsize); + if (PyErr_WarnEx(NULL, warning, 0) < 0) goto bad; + } + return (PyTypeObject *)result; +bad: + Py_XDECREF(result); + return NULL; +} +#endif + +/* FetchSharedCythonModule */ +static PyObject *__Pyx_FetchSharedCythonABIModule(void) { + PyObject *abi_module = PyImport_AddModule((char*) __PYX_ABI_MODULE_NAME); + if (unlikely(!abi_module)) return NULL; + Py_INCREF(abi_module); + return abi_module; +} + +/* FetchCommonType */ +static int __Pyx_VerifyCachedType(PyObject *cached_type, + const char *name, + Py_ssize_t basicsize, + Py_ssize_t expected_basicsize) { + if (!PyType_Check(cached_type)) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s is not a type object", name); + return -1; + } + if (basicsize != expected_basicsize) { + PyErr_Format(PyExc_TypeError, + "Shared Cython type %.200s has the wrong size, try recompiling", + name); + return -1; + } + return 0; +} +#if !CYTHON_USE_TYPE_SPECS +static PyTypeObject* __Pyx_FetchCommonType(PyTypeObject* type) { + PyObject* abi_module; + const char* object_name; + PyTypeObject *cached_type = NULL; + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + object_name = strrchr(type->tp_name, '.'); + object_name = object_name ? object_name+1 : type->tp_name; + cached_type = (PyTypeObject*) PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + if (__Pyx_VerifyCachedType( + (PyObject *)cached_type, + object_name, + cached_type->tp_basicsize, + type->tp_basicsize) < 0) { + goto bad; + } + goto done; + } + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + if (PyType_Ready(type) < 0) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, (PyObject *)type) < 0) + goto bad; + Py_INCREF(type); + cached_type = type; +done: + Py_DECREF(abi_module); + return cached_type; +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#else +static PyTypeObject *__Pyx_FetchCommonTypeFromSpec(PyObject *module, PyType_Spec *spec, PyObject *bases) { + PyObject *abi_module, *cached_type = NULL; + const char* object_name = strrchr(spec->name, '.'); + object_name = object_name ? object_name+1 : spec->name; + abi_module = __Pyx_FetchSharedCythonABIModule(); + if (!abi_module) return NULL; + cached_type = PyObject_GetAttrString(abi_module, object_name); + if (cached_type) { + Py_ssize_t basicsize; +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *py_basicsize; + py_basicsize = PyObject_GetAttrString(cached_type, "__basicsize__"); + if (unlikely(!py_basicsize)) goto bad; + basicsize = PyLong_AsSsize_t(py_basicsize); + Py_DECREF(py_basicsize); + py_basicsize = 0; + if (unlikely(basicsize == (Py_ssize_t)-1) && PyErr_Occurred()) goto bad; +#else + basicsize = likely(PyType_Check(cached_type)) ? ((PyTypeObject*) cached_type)->tp_basicsize : -1; +#endif + if (__Pyx_VerifyCachedType( + cached_type, + object_name, + basicsize, + spec->basicsize) < 0) { + goto bad; + } + goto done; + } + if (!PyErr_ExceptionMatches(PyExc_AttributeError)) goto bad; + PyErr_Clear(); + CYTHON_UNUSED_VAR(module); + cached_type = __Pyx_PyType_FromModuleAndSpec(abi_module, spec, bases); + if (unlikely(!cached_type)) goto bad; + if (unlikely(__Pyx_fix_up_extension_type_from_spec(spec, (PyTypeObject *) cached_type) < 0)) goto bad; + if (PyObject_SetAttrString(abi_module, object_name, cached_type) < 0) goto bad; +done: + Py_DECREF(abi_module); + assert(cached_type == NULL || PyType_Check(cached_type)); + return (PyTypeObject *) cached_type; +bad: + Py_XDECREF(cached_type); + cached_type = NULL; + goto done; +} +#endif + +/* PyVectorcallFastCallDict */ +#if CYTHON_METH_FASTCALL +static PyObject *__Pyx_PyVectorcall_FastCallDict_kw(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + PyObject *res = NULL; + PyObject *kwnames; + PyObject **newargs; + PyObject **kwvalues; + Py_ssize_t i, pos; + size_t j; + PyObject *key, *value; + unsigned long keys_are_strings; + Py_ssize_t nkw = PyDict_GET_SIZE(kw); + newargs = (PyObject **)PyMem_Malloc((nargs + (size_t)nkw) * sizeof(args[0])); + if (unlikely(newargs == NULL)) { + PyErr_NoMemory(); + return NULL; + } + for (j = 0; j < nargs; j++) newargs[j] = args[j]; + kwnames = PyTuple_New(nkw); + if (unlikely(kwnames == NULL)) { + PyMem_Free(newargs); + return NULL; + } + kwvalues = newargs + nargs; + pos = i = 0; + keys_are_strings = Py_TPFLAGS_UNICODE_SUBCLASS; + while (PyDict_Next(kw, &pos, &key, &value)) { + keys_are_strings &= Py_TYPE(key)->tp_flags; + Py_INCREF(key); + Py_INCREF(value); + PyTuple_SET_ITEM(kwnames, i, key); + kwvalues[i] = value; + i++; + } + if (unlikely(!keys_are_strings)) { + PyErr_SetString(PyExc_TypeError, "keywords must be strings"); + goto cleanup; + } + res = vc(func, newargs, nargs, kwnames); +cleanup: + Py_DECREF(kwnames); + for (i = 0; i < nkw; i++) + Py_DECREF(kwvalues[i]); + PyMem_Free(newargs); + return res; +} +static CYTHON_INLINE PyObject *__Pyx_PyVectorcall_FastCallDict(PyObject *func, __pyx_vectorcallfunc vc, PyObject *const *args, size_t nargs, PyObject *kw) +{ + if (likely(kw == NULL) || PyDict_GET_SIZE(kw) == 0) { + return vc(func, args, nargs, NULL); + } + return __Pyx_PyVectorcall_FastCallDict_kw(func, vc, args, nargs, kw); +} +#endif + +/* CythonFunctionShared */ +static CYTHON_INLINE void __Pyx__CyFunction_SetClassObj(__pyx_CyFunctionObject* f, PyObject* classobj) { +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + __Pyx_Py_XDECREF_SET( + __Pyx_CyFunction_GetClassObj(f), + ((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#else + __Pyx_Py_XDECREF_SET( + ((PyCMethodObject *) (f))->mm_class, + (PyTypeObject*)((classobj) ? __Pyx_NewRef(classobj) : NULL)); +#endif +} +static PyObject * +__Pyx_CyFunction_get_doc(__pyx_CyFunctionObject *op, void *closure) +{ + CYTHON_UNUSED_VAR(closure); + if (unlikely(op->func_doc == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_doc = PyObject_GetAttrString(op->func, "__doc__"); + if (unlikely(!op->func_doc)) return NULL; +#else + if (((PyCFunctionObject*)op)->m_ml->ml_doc) { +#if PY_MAJOR_VERSION >= 3 + op->func_doc = PyUnicode_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); +#else + op->func_doc = PyString_FromString(((PyCFunctionObject*)op)->m_ml->ml_doc); +#endif + if (unlikely(op->func_doc == NULL)) + return NULL; + } else { + Py_INCREF(Py_None); + return Py_None; + } +#endif + } + Py_INCREF(op->func_doc); + return op->func_doc; +} +static int +__Pyx_CyFunction_set_doc(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (value == NULL) { + value = Py_None; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_doc, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_name(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_name == NULL)) { +#if CYTHON_COMPILING_IN_LIMITED_API + op->func_name = PyObject_GetAttrString(op->func, "__name__"); +#elif PY_MAJOR_VERSION >= 3 + op->func_name = PyUnicode_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#else + op->func_name = PyString_InternFromString(((PyCFunctionObject*)op)->m_ml->ml_name); +#endif + if (unlikely(op->func_name == NULL)) + return NULL; + } + Py_INCREF(op->func_name); + return op->func_name; +} +static int +__Pyx_CyFunction_set_name(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__name__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_name, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_qualname(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_qualname); + return op->func_qualname; +} +static int +__Pyx_CyFunction_set_qualname(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); +#if PY_MAJOR_VERSION >= 3 + if (unlikely(value == NULL || !PyUnicode_Check(value))) +#else + if (unlikely(value == NULL || !PyString_Check(value))) +#endif + { + PyErr_SetString(PyExc_TypeError, + "__qualname__ must be set to a string object"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_qualname, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_dict(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(op->func_dict == NULL)) { + op->func_dict = PyDict_New(); + if (unlikely(op->func_dict == NULL)) + return NULL; + } + Py_INCREF(op->func_dict); + return op->func_dict; +} +static int +__Pyx_CyFunction_set_dict(__pyx_CyFunctionObject *op, PyObject *value, void *context) +{ + CYTHON_UNUSED_VAR(context); + if (unlikely(value == NULL)) { + PyErr_SetString(PyExc_TypeError, + "function's dictionary may not be deleted"); + return -1; + } + if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "setting function's dictionary to a non-dict"); + return -1; + } + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->func_dict, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_globals(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(context); + Py_INCREF(op->func_globals); + return op->func_globals; +} +static PyObject * +__Pyx_CyFunction_get_closure(__pyx_CyFunctionObject *op, void *context) +{ + CYTHON_UNUSED_VAR(op); + CYTHON_UNUSED_VAR(context); + Py_INCREF(Py_None); + return Py_None; +} +static PyObject * +__Pyx_CyFunction_get_code(__pyx_CyFunctionObject *op, void *context) +{ + PyObject* result = (op->func_code) ? op->func_code : Py_None; + CYTHON_UNUSED_VAR(context); + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_init_defaults(__pyx_CyFunctionObject *op) { + int result = 0; + PyObject *res = op->defaults_getter((PyObject *) op); + if (unlikely(!res)) + return -1; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + op->defaults_tuple = PyTuple_GET_ITEM(res, 0); + Py_INCREF(op->defaults_tuple); + op->defaults_kwdict = PyTuple_GET_ITEM(res, 1); + Py_INCREF(op->defaults_kwdict); + #else + op->defaults_tuple = __Pyx_PySequence_ITEM(res, 0); + if (unlikely(!op->defaults_tuple)) result = -1; + else { + op->defaults_kwdict = __Pyx_PySequence_ITEM(res, 1); + if (unlikely(!op->defaults_kwdict)) result = -1; + } + #endif + Py_DECREF(res); + return result; +} +static int +__Pyx_CyFunction_set_defaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + value = Py_None; + } else if (unlikely(value != Py_None && !PyTuple_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__defaults__ must be set to a tuple object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__defaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->defaults_tuple, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_defaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->defaults_tuple; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_tuple; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_set_kwdefaults(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value) { + value = Py_None; + } else if (unlikely(value != Py_None && !PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__kwdefaults__ must be set to a dict object"); + return -1; + } + PyErr_WarnEx(PyExc_RuntimeWarning, "changes to cyfunction.__kwdefaults__ will not " + "currently affect the values used in function calls", 1); + Py_INCREF(value); + __Pyx_Py_XDECREF_SET(op->defaults_kwdict, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_kwdefaults(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->defaults_kwdict; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + if (op->defaults_getter) { + if (unlikely(__Pyx_CyFunction_init_defaults(op) < 0)) return NULL; + result = op->defaults_kwdict; + } else { + result = Py_None; + } + } + Py_INCREF(result); + return result; +} +static int +__Pyx_CyFunction_set_annotations(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + if (!value || value == Py_None) { + value = NULL; + } else if (unlikely(!PyDict_Check(value))) { + PyErr_SetString(PyExc_TypeError, + "__annotations__ must be set to a dict object"); + return -1; + } + Py_XINCREF(value); + __Pyx_Py_XDECREF_SET(op->func_annotations, value); + return 0; +} +static PyObject * +__Pyx_CyFunction_get_annotations(__pyx_CyFunctionObject *op, void *context) { + PyObject* result = op->func_annotations; + CYTHON_UNUSED_VAR(context); + if (unlikely(!result)) { + result = PyDict_New(); + if (unlikely(!result)) return NULL; + op->func_annotations = result; + } + Py_INCREF(result); + return result; +} +static PyObject * +__Pyx_CyFunction_get_is_coroutine(__pyx_CyFunctionObject *op, void *context) { + int is_coroutine; + CYTHON_UNUSED_VAR(context); + if (op->func_is_coroutine) { + return __Pyx_NewRef(op->func_is_coroutine); + } + is_coroutine = op->flags & __Pyx_CYFUNCTION_COROUTINE; +#if PY_VERSION_HEX >= 0x03050000 + if (is_coroutine) { + PyObject *module, *fromlist, *marker = __pyx_n_s_is_coroutine; + fromlist = PyList_New(1); + if (unlikely(!fromlist)) return NULL; + Py_INCREF(marker); +#if CYTHON_ASSUME_SAFE_MACROS + PyList_SET_ITEM(fromlist, 0, marker); +#else + if (unlikely(PyList_SetItem(fromlist, 0, marker) < 0)) { + Py_DECREF(marker); + Py_DECREF(fromlist); + return NULL; + } +#endif + module = PyImport_ImportModuleLevelObject(__pyx_n_s_asyncio_coroutines, NULL, NULL, fromlist, 0); + Py_DECREF(fromlist); + if (unlikely(!module)) goto ignore; + op->func_is_coroutine = __Pyx_PyObject_GetAttrStr(module, marker); + Py_DECREF(module); + if (likely(op->func_is_coroutine)) { + return __Pyx_NewRef(op->func_is_coroutine); + } +ignore: + PyErr_Clear(); + } +#endif + op->func_is_coroutine = __Pyx_PyBool_FromLong(is_coroutine); + return __Pyx_NewRef(op->func_is_coroutine); +} +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject * +__Pyx_CyFunction_get_module(__pyx_CyFunctionObject *op, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_GetAttrString(op->func, "__module__"); +} +static int +__Pyx_CyFunction_set_module(__pyx_CyFunctionObject *op, PyObject* value, void *context) { + CYTHON_UNUSED_VAR(context); + return PyObject_SetAttrString(op->func, "__module__", value); +} +#endif +static PyGetSetDef __pyx_CyFunction_getsets[] = { + {(char *) "func_doc", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "__doc__", (getter)__Pyx_CyFunction_get_doc, (setter)__Pyx_CyFunction_set_doc, 0, 0}, + {(char *) "func_name", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__name__", (getter)__Pyx_CyFunction_get_name, (setter)__Pyx_CyFunction_set_name, 0, 0}, + {(char *) "__qualname__", (getter)__Pyx_CyFunction_get_qualname, (setter)__Pyx_CyFunction_set_qualname, 0, 0}, + {(char *) "func_dict", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "__dict__", (getter)__Pyx_CyFunction_get_dict, (setter)__Pyx_CyFunction_set_dict, 0, 0}, + {(char *) "func_globals", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "__globals__", (getter)__Pyx_CyFunction_get_globals, 0, 0, 0}, + {(char *) "func_closure", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "__closure__", (getter)__Pyx_CyFunction_get_closure, 0, 0, 0}, + {(char *) "func_code", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "__code__", (getter)__Pyx_CyFunction_get_code, 0, 0, 0}, + {(char *) "func_defaults", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__defaults__", (getter)__Pyx_CyFunction_get_defaults, (setter)__Pyx_CyFunction_set_defaults, 0, 0}, + {(char *) "__kwdefaults__", (getter)__Pyx_CyFunction_get_kwdefaults, (setter)__Pyx_CyFunction_set_kwdefaults, 0, 0}, + {(char *) "__annotations__", (getter)__Pyx_CyFunction_get_annotations, (setter)__Pyx_CyFunction_set_annotations, 0, 0}, + {(char *) "_is_coroutine", (getter)__Pyx_CyFunction_get_is_coroutine, 0, 0, 0}, +#if CYTHON_COMPILING_IN_LIMITED_API + {"__module__", (getter)__Pyx_CyFunction_get_module, (setter)__Pyx_CyFunction_set_module, 0, 0}, +#endif + {0, 0, 0, 0, 0} +}; +static PyMemberDef __pyx_CyFunction_members[] = { +#if !CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__module__", T_OBJECT, offsetof(PyCFunctionObject, m_module), 0, 0}, +#endif +#if CYTHON_USE_TYPE_SPECS + {(char *) "__dictoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_dict), READONLY, 0}, +#if CYTHON_METH_FASTCALL +#if CYTHON_BACKPORT_VECTORCALL + {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_vectorcall), READONLY, 0}, +#else +#if !CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__vectorcalloffset__", T_PYSSIZET, offsetof(PyCFunctionObject, vectorcall), READONLY, 0}, +#endif +#endif +#endif +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(__pyx_CyFunctionObject, func_weakreflist), READONLY, 0}, +#else + {(char *) "__weaklistoffset__", T_PYSSIZET, offsetof(PyCFunctionObject, m_weakreflist), READONLY, 0}, +#endif +#endif + {0, 0, 0, 0, 0} +}; +static PyObject * +__Pyx_CyFunction_reduce(__pyx_CyFunctionObject *m, PyObject *args) +{ + CYTHON_UNUSED_VAR(args); +#if PY_MAJOR_VERSION >= 3 + Py_INCREF(m->func_qualname); + return m->func_qualname; +#else + return PyString_FromString(((PyCFunctionObject*)m)->m_ml->ml_name); +#endif +} +static PyMethodDef __pyx_CyFunction_methods[] = { + {"__reduce__", (PyCFunction)__Pyx_CyFunction_reduce, METH_VARARGS, 0}, + {0, 0, 0, 0} +}; +#if PY_VERSION_HEX < 0x030500A0 || CYTHON_COMPILING_IN_LIMITED_API +#define __Pyx_CyFunction_weakreflist(cyfunc) ((cyfunc)->func_weakreflist) +#else +#define __Pyx_CyFunction_weakreflist(cyfunc) (((PyCFunctionObject*)cyfunc)->m_weakreflist) +#endif +static PyObject *__Pyx_CyFunction_Init(__pyx_CyFunctionObject *op, PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { +#if !CYTHON_COMPILING_IN_LIMITED_API + PyCFunctionObject *cf = (PyCFunctionObject*) op; +#endif + if (unlikely(op == NULL)) + return NULL; +#if CYTHON_COMPILING_IN_LIMITED_API + op->func = PyCFunction_NewEx(ml, (PyObject*)op, module); + if (unlikely(!op->func)) return NULL; +#endif + op->flags = flags; + __Pyx_CyFunction_weakreflist(op) = NULL; +#if !CYTHON_COMPILING_IN_LIMITED_API + cf->m_ml = ml; + cf->m_self = (PyObject *) op; +#endif + Py_XINCREF(closure); + op->func_closure = closure; +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_XINCREF(module); + cf->m_module = module; +#endif + op->func_dict = NULL; + op->func_name = NULL; + Py_INCREF(qualname); + op->func_qualname = qualname; + op->func_doc = NULL; +#if PY_VERSION_HEX < 0x030900B1 || CYTHON_COMPILING_IN_LIMITED_API + op->func_classobj = NULL; +#else + ((PyCMethodObject*)op)->mm_class = NULL; +#endif + op->func_globals = globals; + Py_INCREF(op->func_globals); + Py_XINCREF(code); + op->func_code = code; + op->defaults_pyobjects = 0; + op->defaults_size = 0; + op->defaults = NULL; + op->defaults_tuple = NULL; + op->defaults_kwdict = NULL; + op->defaults_getter = NULL; + op->func_annotations = NULL; + op->func_is_coroutine = NULL; +#if CYTHON_METH_FASTCALL + switch (ml->ml_flags & (METH_VARARGS | METH_FASTCALL | METH_NOARGS | METH_O | METH_KEYWORDS | METH_METHOD)) { + case METH_NOARGS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_NOARGS; + break; + case METH_O: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_O; + break; + case METH_METHOD | METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD; + break; + case METH_FASTCALL | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS; + break; + case METH_VARARGS | METH_KEYWORDS: + __Pyx_CyFunction_func_vectorcall(op) = NULL; + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + Py_DECREF(op); + return NULL; + } +#endif + return (PyObject *) op; +} +static int +__Pyx_CyFunction_clear(__pyx_CyFunctionObject *m) +{ + Py_CLEAR(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_CLEAR(m->func); +#else + Py_CLEAR(((PyCFunctionObject*)m)->m_module); +#endif + Py_CLEAR(m->func_dict); + Py_CLEAR(m->func_name); + Py_CLEAR(m->func_qualname); + Py_CLEAR(m->func_doc); + Py_CLEAR(m->func_globals); + Py_CLEAR(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API +#if PY_VERSION_HEX < 0x030900B1 + Py_CLEAR(__Pyx_CyFunction_GetClassObj(m)); +#else + { + PyObject *cls = (PyObject*) ((PyCMethodObject *) (m))->mm_class; + ((PyCMethodObject *) (m))->mm_class = NULL; + Py_XDECREF(cls); + } +#endif +#endif + Py_CLEAR(m->defaults_tuple); + Py_CLEAR(m->defaults_kwdict); + Py_CLEAR(m->func_annotations); + Py_CLEAR(m->func_is_coroutine); + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + for (i = 0; i < m->defaults_pyobjects; i++) + Py_XDECREF(pydefaults[i]); + PyObject_Free(m->defaults); + m->defaults = NULL; + } + return 0; +} +static void __Pyx__CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + if (__Pyx_CyFunction_weakreflist(m) != NULL) + PyObject_ClearWeakRefs((PyObject *) m); + __Pyx_CyFunction_clear(m); + __Pyx_PyHeapTypeObject_GC_Del(m); +} +static void __Pyx_CyFunction_dealloc(__pyx_CyFunctionObject *m) +{ + PyObject_GC_UnTrack(m); + __Pyx__CyFunction_dealloc(m); +} +static int __Pyx_CyFunction_traverse(__pyx_CyFunctionObject *m, visitproc visit, void *arg) +{ + Py_VISIT(m->func_closure); +#if CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(m->func); +#else + Py_VISIT(((PyCFunctionObject*)m)->m_module); +#endif + Py_VISIT(m->func_dict); + Py_VISIT(m->func_name); + Py_VISIT(m->func_qualname); + Py_VISIT(m->func_doc); + Py_VISIT(m->func_globals); + Py_VISIT(m->func_code); +#if !CYTHON_COMPILING_IN_LIMITED_API + Py_VISIT(__Pyx_CyFunction_GetClassObj(m)); +#endif + Py_VISIT(m->defaults_tuple); + Py_VISIT(m->defaults_kwdict); + Py_VISIT(m->func_is_coroutine); + if (m->defaults) { + PyObject **pydefaults = __Pyx_CyFunction_Defaults(PyObject *, m); + int i; + for (i = 0; i < m->defaults_pyobjects; i++) + Py_VISIT(pydefaults[i]); + } + return 0; +} +static PyObject* +__Pyx_CyFunction_repr(__pyx_CyFunctionObject *op) +{ +#if PY_MAJOR_VERSION >= 3 + return PyUnicode_FromFormat("", + op->func_qualname, (void *)op); +#else + return PyString_FromFormat("", + PyString_AsString(op->func_qualname), (void *)op); +#endif +} +static PyObject * __Pyx_CyFunction_CallMethod(PyObject *func, PyObject *self, PyObject *arg, PyObject *kw) { +#if CYTHON_COMPILING_IN_LIMITED_API + PyObject *f = ((__pyx_CyFunctionObject*)func)->func; + PyObject *py_name = NULL; + PyCFunction meth; + int flags; + meth = PyCFunction_GetFunction(f); + if (unlikely(!meth)) return NULL; + flags = PyCFunction_GetFlags(f); + if (unlikely(flags < 0)) return NULL; +#else + PyCFunctionObject* f = (PyCFunctionObject*)func; + PyCFunction meth = f->m_ml->ml_meth; + int flags = f->m_ml->ml_flags; +#endif + Py_ssize_t size; + switch (flags & (METH_VARARGS | METH_KEYWORDS | METH_NOARGS | METH_O)) { + case METH_VARARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) + return (*meth)(self, arg); + break; + case METH_VARARGS | METH_KEYWORDS: + return (*(PyCFunctionWithKeywords)(void*)meth)(self, arg, kw); + case METH_NOARGS: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 0)) + return (*meth)(self, NULL); +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, + "%.200S() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, size); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); +#endif + return NULL; + } + break; + case METH_O: + if (likely(kw == NULL || PyDict_Size(kw) == 0)) { +#if CYTHON_ASSUME_SAFE_MACROS + size = PyTuple_GET_SIZE(arg); +#else + size = PyTuple_Size(arg); + if (unlikely(size < 0)) return NULL; +#endif + if (likely(size == 1)) { + PyObject *result, *arg0; + #if CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS + arg0 = PyTuple_GET_ITEM(arg, 0); + #else + arg0 = __Pyx_PySequence_ITEM(arg, 0); if (unlikely(!arg0)) return NULL; + #endif + result = (*meth)(self, arg0); + #if !(CYTHON_ASSUME_SAFE_MACROS && !CYTHON_AVOID_BORROWED_REFS) + Py_DECREF(arg0); + #endif + return result; + } +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, + "%.200S() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + py_name, size); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + f->m_ml->ml_name, size); +#endif + return NULL; + } + break; + default: + PyErr_SetString(PyExc_SystemError, "Bad call flags for CyFunction"); + return NULL; + } +#if CYTHON_COMPILING_IN_LIMITED_API + py_name = __Pyx_CyFunction_get_name((__pyx_CyFunctionObject*)func, NULL); + if (!py_name) return NULL; + PyErr_Format(PyExc_TypeError, "%.200S() takes no keyword arguments", + py_name); + Py_DECREF(py_name); +#else + PyErr_Format(PyExc_TypeError, "%.200s() takes no keyword arguments", + f->m_ml->ml_name); +#endif + return NULL; +} +static CYTHON_INLINE PyObject *__Pyx_CyFunction_Call(PyObject *func, PyObject *arg, PyObject *kw) { + PyObject *self, *result; +#if CYTHON_COMPILING_IN_LIMITED_API + self = PyCFunction_GetSelf(((__pyx_CyFunctionObject*)func)->func); + if (unlikely(!self) && PyErr_Occurred()) return NULL; +#else + self = ((PyCFunctionObject*)func)->m_self; +#endif + result = __Pyx_CyFunction_CallMethod(func, self, arg, kw); + return result; +} +static PyObject *__Pyx_CyFunction_CallAsMethod(PyObject *func, PyObject *args, PyObject *kw) { + PyObject *result; + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *) func; +#if CYTHON_METH_FASTCALL + __pyx_vectorcallfunc vc = __Pyx_CyFunction_func_vectorcall(cyfunc); + if (vc) { +#if CYTHON_ASSUME_SAFE_MACROS + return __Pyx_PyVectorcall_FastCallDict(func, vc, &PyTuple_GET_ITEM(args, 0), (size_t)PyTuple_GET_SIZE(args), kw); +#else + (void) &__Pyx_PyVectorcall_FastCallDict; + return PyVectorcall_Call(func, args, kw); +#endif + } +#endif + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + Py_ssize_t argc; + PyObject *new_args; + PyObject *self; +#if CYTHON_ASSUME_SAFE_MACROS + argc = PyTuple_GET_SIZE(args); +#else + argc = PyTuple_Size(args); + if (unlikely(!argc) < 0) return NULL; +#endif + new_args = PyTuple_GetSlice(args, 1, argc); + if (unlikely(!new_args)) + return NULL; + self = PyTuple_GetItem(args, 0); + if (unlikely(!self)) { + Py_DECREF(new_args); +#if PY_MAJOR_VERSION > 2 + PyErr_Format(PyExc_TypeError, + "unbound method %.200S() needs an argument", + cyfunc->func_qualname); +#else + PyErr_SetString(PyExc_TypeError, + "unbound method needs an argument"); +#endif + return NULL; + } + result = __Pyx_CyFunction_CallMethod(func, self, new_args, kw); + Py_DECREF(new_args); + } else { + result = __Pyx_CyFunction_Call(func, args, kw); + } + return result; +} +#if CYTHON_METH_FASTCALL +static CYTHON_INLINE int __Pyx_CyFunction_Vectorcall_CheckArgs(__pyx_CyFunctionObject *cyfunc, Py_ssize_t nargs, PyObject *kwnames) +{ + int ret = 0; + if ((cyfunc->flags & __Pyx_CYFUNCTION_CCLASS) && !(cyfunc->flags & __Pyx_CYFUNCTION_STATICMETHOD)) { + if (unlikely(nargs < 1)) { + PyErr_Format(PyExc_TypeError, "%.200s() needs an argument", + ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); + return -1; + } + ret = 1; + } + if (unlikely(kwnames) && unlikely(PyTuple_GET_SIZE(kwnames))) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes no keyword arguments", ((PyCFunctionObject*)cyfunc)->m_ml->ml_name); + return -1; + } + return ret; +} +static PyObject * __Pyx_CyFunction_Vectorcall_NOARGS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + if (unlikely(nargs != 0)) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes no arguments (%" CYTHON_FORMAT_SSIZE_T "d given)", + def->ml_name, nargs); + return NULL; + } + return def->ml_meth(self, NULL); +} +static PyObject * __Pyx_CyFunction_Vectorcall_O(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, kwnames)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + if (unlikely(nargs != 1)) { + PyErr_Format(PyExc_TypeError, + "%.200s() takes exactly one argument (%" CYTHON_FORMAT_SSIZE_T "d given)", + def->ml_name, nargs); + return NULL; + } + return def->ml_meth(self, args[0]); +} +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + return ((_PyCFunctionFastWithKeywords)(void(*)(void))def->ml_meth)(self, args, nargs, kwnames); +} +static PyObject * __Pyx_CyFunction_Vectorcall_FASTCALL_KEYWORDS_METHOD(PyObject *func, PyObject *const *args, size_t nargsf, PyObject *kwnames) +{ + __pyx_CyFunctionObject *cyfunc = (__pyx_CyFunctionObject *)func; + PyMethodDef* def = ((PyCFunctionObject*)cyfunc)->m_ml; + PyTypeObject *cls = (PyTypeObject *) __Pyx_CyFunction_GetClassObj(cyfunc); +#if CYTHON_BACKPORT_VECTORCALL + Py_ssize_t nargs = (Py_ssize_t)nargsf; +#else + Py_ssize_t nargs = PyVectorcall_NARGS(nargsf); +#endif + PyObject *self; + switch (__Pyx_CyFunction_Vectorcall_CheckArgs(cyfunc, nargs, NULL)) { + case 1: + self = args[0]; + args += 1; + nargs -= 1; + break; + case 0: + self = ((PyCFunctionObject*)cyfunc)->m_self; + break; + default: + return NULL; + } + return ((__Pyx_PyCMethod)(void(*)(void))def->ml_meth)(self, cls, args, (size_t)nargs, kwnames); +} +#endif +#if CYTHON_USE_TYPE_SPECS +static PyType_Slot __pyx_CyFunctionType_slots[] = { + {Py_tp_dealloc, (void *)__Pyx_CyFunction_dealloc}, + {Py_tp_repr, (void *)__Pyx_CyFunction_repr}, + {Py_tp_call, (void *)__Pyx_CyFunction_CallAsMethod}, + {Py_tp_traverse, (void *)__Pyx_CyFunction_traverse}, + {Py_tp_clear, (void *)__Pyx_CyFunction_clear}, + {Py_tp_methods, (void *)__pyx_CyFunction_methods}, + {Py_tp_members, (void *)__pyx_CyFunction_members}, + {Py_tp_getset, (void *)__pyx_CyFunction_getsets}, + {Py_tp_descr_get, (void *)__Pyx_PyMethod_New}, + {0, 0}, +}; +static PyType_Spec __pyx_CyFunctionType_spec = { + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if (defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL) + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, + __pyx_CyFunctionType_slots +}; +#else +static PyTypeObject __pyx_CyFunctionType_type = { + PyVarObject_HEAD_INIT(0, 0) + __PYX_TYPE_MODULE_PREFIX "cython_function_or_method", + sizeof(__pyx_CyFunctionObject), + 0, + (destructor) __Pyx_CyFunction_dealloc, +#if !CYTHON_METH_FASTCALL + 0, +#elif CYTHON_BACKPORT_VECTORCALL + (printfunc)offsetof(__pyx_CyFunctionObject, func_vectorcall), +#else + offsetof(PyCFunctionObject, vectorcall), +#endif + 0, + 0, +#if PY_MAJOR_VERSION < 3 + 0, +#else + 0, +#endif + (reprfunc) __Pyx_CyFunction_repr, + 0, + 0, + 0, + 0, + __Pyx_CyFunction_CallAsMethod, + 0, + 0, + 0, + 0, +#ifdef Py_TPFLAGS_METHOD_DESCRIPTOR + Py_TPFLAGS_METHOD_DESCRIPTOR | +#endif +#if defined(_Py_TPFLAGS_HAVE_VECTORCALL) && CYTHON_METH_FASTCALL + _Py_TPFLAGS_HAVE_VECTORCALL | +#endif + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_HAVE_GC | Py_TPFLAGS_BASETYPE, + 0, + (traverseproc) __Pyx_CyFunction_traverse, + (inquiry) __Pyx_CyFunction_clear, + 0, +#if PY_VERSION_HEX < 0x030500A0 + offsetof(__pyx_CyFunctionObject, func_weakreflist), +#else + offsetof(PyCFunctionObject, m_weakreflist), +#endif + 0, + 0, + __pyx_CyFunction_methods, + __pyx_CyFunction_members, + __pyx_CyFunction_getsets, + 0, + 0, + __Pyx_PyMethod_New, + 0, + offsetof(__pyx_CyFunctionObject, func_dict), + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, + 0, +#if PY_VERSION_HEX >= 0x030400a1 + 0, +#endif +#if PY_VERSION_HEX >= 0x030800b1 && (!CYTHON_COMPILING_IN_PYPY || PYPY_VERSION_NUM >= 0x07030800) + 0, +#endif +#if __PYX_NEED_TP_PRINT_SLOT + 0, +#endif +#if PY_VERSION_HEX >= 0x030C0000 + 0, +#endif +#if CYTHON_COMPILING_IN_PYPY && PY_VERSION_HEX >= 0x03090000 && PY_VERSION_HEX < 0x030a0000 + 0, +#endif +}; +#endif +static int __pyx_CyFunction_init(PyObject *module) { +#if CYTHON_USE_TYPE_SPECS + __pyx_CyFunctionType = __Pyx_FetchCommonTypeFromSpec(module, &__pyx_CyFunctionType_spec, NULL); +#else + CYTHON_UNUSED_VAR(module); + __pyx_CyFunctionType = __Pyx_FetchCommonType(&__pyx_CyFunctionType_type); +#endif + if (unlikely(__pyx_CyFunctionType == NULL)) { + return -1; + } + return 0; +} +static CYTHON_INLINE void *__Pyx_CyFunction_InitDefaults(PyObject *func, size_t size, int pyobjects) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults = PyObject_Malloc(size); + if (unlikely(!m->defaults)) + return PyErr_NoMemory(); + memset(m->defaults, 0, size); + m->defaults_pyobjects = pyobjects; + m->defaults_size = size; + return m->defaults; +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsTuple(PyObject *func, PyObject *tuple) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_tuple = tuple; + Py_INCREF(tuple); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetDefaultsKwDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->defaults_kwdict = dict; + Py_INCREF(dict); +} +static CYTHON_INLINE void __Pyx_CyFunction_SetAnnotationsDict(PyObject *func, PyObject *dict) { + __pyx_CyFunctionObject *m = (__pyx_CyFunctionObject *) func; + m->func_annotations = dict; + Py_INCREF(dict); +} + +/* CythonFunction */ +static PyObject *__Pyx_CyFunction_New(PyMethodDef *ml, int flags, PyObject* qualname, + PyObject *closure, PyObject *module, PyObject* globals, PyObject* code) { + PyObject *op = __Pyx_CyFunction_Init( + PyObject_GC_New(__pyx_CyFunctionObject, __pyx_CyFunctionType), + ml, flags, qualname, closure, module, globals, code + ); + if (likely(op)) { + PyObject_GC_Track(op); + } + return op; +} + +/* CLineInTraceback */ +#ifndef CYTHON_CLINE_IN_TRACEBACK +static int __Pyx_CLineForTraceback(PyThreadState *tstate, int c_line) { + PyObject *use_cline; + PyObject *ptype, *pvalue, *ptraceback; +#if CYTHON_COMPILING_IN_CPYTHON + PyObject **cython_runtime_dict; +#endif + CYTHON_MAYBE_UNUSED_VAR(tstate); + if (unlikely(!__pyx_cython_runtime)) { + return c_line; + } + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); +#if CYTHON_COMPILING_IN_CPYTHON + cython_runtime_dict = _PyObject_GetDictPtr(__pyx_cython_runtime); + if (likely(cython_runtime_dict)) { + __PYX_PY_DICT_LOOKUP_IF_MODIFIED( + use_cline, *cython_runtime_dict, + __Pyx_PyDict_GetItemStr(*cython_runtime_dict, __pyx_n_s_cline_in_traceback)) + } else +#endif + { + PyObject *use_cline_obj = __Pyx_PyObject_GetAttrStrNoError(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback); + if (use_cline_obj) { + use_cline = PyObject_Not(use_cline_obj) ? Py_False : Py_True; + Py_DECREF(use_cline_obj); + } else { + PyErr_Clear(); + use_cline = NULL; + } + } + if (!use_cline) { + c_line = 0; + (void) PyObject_SetAttr(__pyx_cython_runtime, __pyx_n_s_cline_in_traceback, Py_False); + } + else if (use_cline == Py_False || (use_cline != Py_True && PyObject_Not(use_cline) != 0)) { + c_line = 0; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + return c_line; +} +#endif + +/* CodeObjectCache */ +#if !CYTHON_COMPILING_IN_LIMITED_API +static int __pyx_bisect_code_objects(__Pyx_CodeObjectCacheEntry* entries, int count, int code_line) { + int start = 0, mid = 0, end = count - 1; + if (end >= 0 && code_line > entries[end].code_line) { + return count; + } + while (start < end) { + mid = start + (end - start) / 2; + if (code_line < entries[mid].code_line) { + end = mid; + } else if (code_line > entries[mid].code_line) { + start = mid + 1; + } else { + return mid; + } + } + if (code_line <= entries[mid].code_line) { + return mid; + } else { + return mid + 1; + } +} +static PyCodeObject *__pyx_find_code_object(int code_line) { + PyCodeObject* code_object; + int pos; + if (unlikely(!code_line) || unlikely(!__pyx_code_cache.entries)) { + return NULL; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if (unlikely(pos >= __pyx_code_cache.count) || unlikely(__pyx_code_cache.entries[pos].code_line != code_line)) { + return NULL; + } + code_object = __pyx_code_cache.entries[pos].code_object; + Py_INCREF(code_object); + return code_object; +} +static void __pyx_insert_code_object(int code_line, PyCodeObject* code_object) { + int pos, i; + __Pyx_CodeObjectCacheEntry* entries = __pyx_code_cache.entries; + if (unlikely(!code_line)) { + return; + } + if (unlikely(!entries)) { + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Malloc(64*sizeof(__Pyx_CodeObjectCacheEntry)); + if (likely(entries)) { + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = 64; + __pyx_code_cache.count = 1; + entries[0].code_line = code_line; + entries[0].code_object = code_object; + Py_INCREF(code_object); + } + return; + } + pos = __pyx_bisect_code_objects(__pyx_code_cache.entries, __pyx_code_cache.count, code_line); + if ((pos < __pyx_code_cache.count) && unlikely(__pyx_code_cache.entries[pos].code_line == code_line)) { + PyCodeObject* tmp = entries[pos].code_object; + entries[pos].code_object = code_object; + Py_DECREF(tmp); + return; + } + if (__pyx_code_cache.count == __pyx_code_cache.max_count) { + int new_max = __pyx_code_cache.max_count + 64; + entries = (__Pyx_CodeObjectCacheEntry*)PyMem_Realloc( + __pyx_code_cache.entries, ((size_t)new_max) * sizeof(__Pyx_CodeObjectCacheEntry)); + if (unlikely(!entries)) { + return; + } + __pyx_code_cache.entries = entries; + __pyx_code_cache.max_count = new_max; + } + for (i=__pyx_code_cache.count; i>pos; i--) { + entries[i] = entries[i-1]; + } + entries[pos].code_line = code_line; + entries[pos].code_object = code_object; + __pyx_code_cache.count++; + Py_INCREF(code_object); +} +#endif + +/* AddTraceback */ +#include "compile.h" +#include "frameobject.h" +#include "traceback.h" +#if PY_VERSION_HEX >= 0x030b00a6 && !CYTHON_COMPILING_IN_LIMITED_API + #ifndef Py_BUILD_CORE + #define Py_BUILD_CORE 1 + #endif + #include "internal/pycore_frame.h" +#endif +#if CYTHON_COMPILING_IN_LIMITED_API +static PyObject *__Pyx_PyCode_Replace_For_AddTraceback(PyObject *code, PyObject *scratch_dict, + PyObject *firstlineno, PyObject *name) { + PyObject *replace = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_firstlineno", firstlineno))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "co_name", name))) return NULL; + replace = PyObject_GetAttrString(code, "replace"); + if (likely(replace)) { + PyObject *result; + result = PyObject_Call(replace, __pyx_empty_tuple, scratch_dict); + Py_DECREF(replace); + return result; + } + #if __PYX_LIMITED_VERSION_HEX < 0x030780000 + PyErr_Clear(); + { + PyObject *compiled = NULL, *result = NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "code", code))) return NULL; + if (unlikely(PyDict_SetItemString(scratch_dict, "type", (PyObject*)(&PyType_Type)))) return NULL; + compiled = Py_CompileString( + "out = type(code)(\n" + " code.co_argcount, code.co_kwonlyargcount, code.co_nlocals, code.co_stacksize,\n" + " code.co_flags, code.co_code, code.co_consts, code.co_names,\n" + " code.co_varnames, code.co_filename, co_name, co_firstlineno,\n" + " code.co_lnotab)\n", "", Py_file_input); + if (!compiled) return NULL; + result = PyEval_EvalCode(compiled, scratch_dict, scratch_dict); + Py_DECREF(compiled); + if (!result) PyErr_Print(); + Py_DECREF(result); + result = PyDict_GetItemString(scratch_dict, "out"); + if (result) Py_INCREF(result); + return result; + } + #endif +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyObject *code_object = NULL, *py_py_line = NULL, *py_funcname = NULL, *dict = NULL; + PyObject *replace = NULL, *getframe = NULL, *frame = NULL; + PyObject *exc_type, *exc_value, *exc_traceback; + int success = 0; + if (c_line) { + (void) __pyx_cfilenm; + (void) __Pyx_CLineForTraceback(__Pyx_PyThreadState_Current, c_line); + } + PyErr_Fetch(&exc_type, &exc_value, &exc_traceback); + code_object = Py_CompileString("_getframe()", filename, Py_eval_input); + if (unlikely(!code_object)) goto bad; + py_py_line = PyLong_FromLong(py_line); + if (unlikely(!py_py_line)) goto bad; + py_funcname = PyUnicode_FromString(funcname); + if (unlikely(!py_funcname)) goto bad; + dict = PyDict_New(); + if (unlikely(!dict)) goto bad; + { + PyObject *old_code_object = code_object; + code_object = __Pyx_PyCode_Replace_For_AddTraceback(code_object, dict, py_py_line, py_funcname); + Py_DECREF(old_code_object); + } + if (unlikely(!code_object)) goto bad; + getframe = PySys_GetObject("_getframe"); + if (unlikely(!getframe)) goto bad; + if (unlikely(PyDict_SetItemString(dict, "_getframe", getframe))) goto bad; + frame = PyEval_EvalCode(code_object, dict, dict); + if (unlikely(!frame) || frame == Py_None) goto bad; + success = 1; + bad: + PyErr_Restore(exc_type, exc_value, exc_traceback); + Py_XDECREF(code_object); + Py_XDECREF(py_py_line); + Py_XDECREF(py_funcname); + Py_XDECREF(dict); + Py_XDECREF(replace); + if (success) { + PyTraceBack_Here( + (struct _frame*)frame); + } + Py_XDECREF(frame); +} +#else +static PyCodeObject* __Pyx_CreateCodeObjectForTraceback( + const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = NULL; + PyObject *py_funcname = NULL; + #if PY_MAJOR_VERSION < 3 + PyObject *py_srcfile = NULL; + py_srcfile = PyString_FromString(filename); + if (!py_srcfile) goto bad; + #endif + if (c_line) { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + #else + py_funcname = PyUnicode_FromFormat( "%s (%s:%d)", funcname, __pyx_cfilenm, c_line); + if (!py_funcname) goto bad; + funcname = PyUnicode_AsUTF8(py_funcname); + if (!funcname) goto bad; + #endif + } + else { + #if PY_MAJOR_VERSION < 3 + py_funcname = PyString_FromString(funcname); + if (!py_funcname) goto bad; + #endif + } + #if PY_MAJOR_VERSION < 3 + py_code = __Pyx_PyCode_New( + 0, + 0, + 0, + 0, + 0, + 0, + __pyx_empty_bytes, /*PyObject *code,*/ + __pyx_empty_tuple, /*PyObject *consts,*/ + __pyx_empty_tuple, /*PyObject *names,*/ + __pyx_empty_tuple, /*PyObject *varnames,*/ + __pyx_empty_tuple, /*PyObject *freevars,*/ + __pyx_empty_tuple, /*PyObject *cellvars,*/ + py_srcfile, /*PyObject *filename,*/ + py_funcname, /*PyObject *name,*/ + py_line, + __pyx_empty_bytes /*PyObject *lnotab*/ + ); + Py_DECREF(py_srcfile); + #else + py_code = PyCode_NewEmpty(filename, funcname, py_line); + #endif + Py_XDECREF(py_funcname); // XDECREF since it's only set on Py3 if cline + return py_code; +bad: + Py_XDECREF(py_funcname); + #if PY_MAJOR_VERSION < 3 + Py_XDECREF(py_srcfile); + #endif + return NULL; +} +static void __Pyx_AddTraceback(const char *funcname, int c_line, + int py_line, const char *filename) { + PyCodeObject *py_code = 0; + PyFrameObject *py_frame = 0; + PyThreadState *tstate = __Pyx_PyThreadState_Current; + PyObject *ptype, *pvalue, *ptraceback; + if (c_line) { + c_line = __Pyx_CLineForTraceback(tstate, c_line); + } + py_code = __pyx_find_code_object(c_line ? -c_line : py_line); + if (!py_code) { + __Pyx_ErrFetchInState(tstate, &ptype, &pvalue, &ptraceback); + py_code = __Pyx_CreateCodeObjectForTraceback( + funcname, c_line, py_line, filename); + if (!py_code) { + /* If the code object creation fails, then we should clear the + fetched exception references and propagate the new exception */ + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptraceback); + goto bad; + } + __Pyx_ErrRestoreInState(tstate, ptype, pvalue, ptraceback); + __pyx_insert_code_object(c_line ? -c_line : py_line, py_code); + } + py_frame = PyFrame_New( + tstate, /*PyThreadState *tstate,*/ + py_code, /*PyCodeObject *code,*/ + __pyx_d, /*PyObject *globals,*/ + 0 /*PyObject *locals*/ + ); + if (!py_frame) goto bad; + __Pyx_PyFrame_SetLineNumber(py_frame, py_line); + PyTraceBack_Here(py_frame); +bad: + Py_XDECREF(py_code); + Py_XDECREF(py_frame); +} +#endif + +#if PY_MAJOR_VERSION < 3 +static int __Pyx_GetBuffer(PyObject *obj, Py_buffer *view, int flags) { + __Pyx_TypeName obj_type_name; + if (PyObject_CheckBuffer(obj)) return PyObject_GetBuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_array_type)) return __pyx_array_getbuffer(obj, view, flags); + if (__Pyx_TypeCheck(obj, __pyx_memoryview_type)) return __pyx_memoryview_getbuffer(obj, view, flags); + obj_type_name = __Pyx_PyType_GetName(Py_TYPE(obj)); + PyErr_Format(PyExc_TypeError, + "'" __Pyx_FMT_TYPENAME "' does not have the buffer interface", + obj_type_name); + __Pyx_DECREF_TypeName(obj_type_name); + return -1; +} +static void __Pyx_ReleaseBuffer(Py_buffer *view) { + PyObject *obj = view->obj; + if (!obj) return; + if (PyObject_CheckBuffer(obj)) { + PyBuffer_Release(view); + return; + } + if ((0)) {} + view->obj = NULL; + Py_DECREF(obj); +} +#endif + + +/* MemviewSliceIsContig */ +static int +__pyx_memviewslice_is_contig(const __Pyx_memviewslice mvs, char order, int ndim) +{ + int i, index, step, start; + Py_ssize_t itemsize = mvs.memview->view.itemsize; + if (order == 'F') { + step = 1; + start = 0; + } else { + step = -1; + start = ndim - 1; + } + for (i = 0; i < ndim; i++) { + index = start + step * i; + if (mvs.suboffsets[index] >= 0 || mvs.strides[index] != itemsize) + return 0; + itemsize *= mvs.shape[index]; + } + return 1; +} + +/* OverlappingSlices */ +static void +__pyx_get_array_memory_extents(__Pyx_memviewslice *slice, + void **out_start, void **out_end, + int ndim, size_t itemsize) +{ + char *start, *end; + int i; + start = end = slice->data; + for (i = 0; i < ndim; i++) { + Py_ssize_t stride = slice->strides[i]; + Py_ssize_t extent = slice->shape[i]; + if (extent == 0) { + *out_start = *out_end = start; + return; + } else { + if (stride > 0) + end += stride * (extent - 1); + else + start += stride * (extent - 1); + } + } + *out_start = start; + *out_end = end + itemsize; +} +static int +__pyx_slices_overlap(__Pyx_memviewslice *slice1, + __Pyx_memviewslice *slice2, + int ndim, size_t itemsize) +{ + void *start1, *end1, *start2, *end2; + __pyx_get_array_memory_extents(slice1, &start1, &end1, ndim, itemsize); + __pyx_get_array_memory_extents(slice2, &start2, &end2, ndim, itemsize); + return (start1 < end2) && (start2 < end1); +} + +/* IsLittleEndian */ +static CYTHON_INLINE int __Pyx_Is_Little_Endian(void) +{ + union { + uint32_t u32; + uint8_t u8[4]; + } S; + S.u32 = 0x01020304; + return S.u8[0] == 4; +} + +/* BufferFormatCheck */ +static void __Pyx_BufFmt_Init(__Pyx_BufFmt_Context* ctx, + __Pyx_BufFmt_StackElem* stack, + __Pyx_TypeInfo* type) { + stack[0].field = &ctx->root; + stack[0].parent_offset = 0; + ctx->root.type = type; + ctx->root.name = "buffer dtype"; + ctx->root.offset = 0; + ctx->head = stack; + ctx->head->field = &ctx->root; + ctx->fmt_offset = 0; + ctx->head->parent_offset = 0; + ctx->new_packmode = '@'; + ctx->enc_packmode = '@'; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->is_complex = 0; + ctx->is_valid_array = 0; + ctx->struct_alignment = 0; + while (type->typegroup == 'S') { + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = 0; + type = type->fields->type; + } +} +static int __Pyx_BufFmt_ParseNumber(const char** ts) { + int count; + const char* t = *ts; + if (*t < '0' || *t > '9') { + return -1; + } else { + count = *t++ - '0'; + while (*t >= '0' && *t <= '9') { + count *= 10; + count += *t++ - '0'; + } + } + *ts = t; + return count; +} +static int __Pyx_BufFmt_ExpectNumber(const char **ts) { + int number = __Pyx_BufFmt_ParseNumber(ts); + if (number == -1) + PyErr_Format(PyExc_ValueError,\ + "Does not understand character buffer dtype format string ('%c')", **ts); + return number; +} +static void __Pyx_BufFmt_RaiseUnexpectedChar(char ch) { + PyErr_Format(PyExc_ValueError, + "Unexpected format string character: '%c'", ch); +} +static const char* __Pyx_BufFmt_DescribeTypeChar(char ch, int is_complex) { + switch (ch) { + case '?': return "'bool'"; + case 'c': return "'char'"; + case 'b': return "'signed char'"; + case 'B': return "'unsigned char'"; + case 'h': return "'short'"; + case 'H': return "'unsigned short'"; + case 'i': return "'int'"; + case 'I': return "'unsigned int'"; + case 'l': return "'long'"; + case 'L': return "'unsigned long'"; + case 'q': return "'long long'"; + case 'Q': return "'unsigned long long'"; + case 'f': return (is_complex ? "'complex float'" : "'float'"); + case 'd': return (is_complex ? "'complex double'" : "'double'"); + case 'g': return (is_complex ? "'complex long double'" : "'long double'"); + case 'T': return "a struct"; + case 'O': return "Python object"; + case 'P': return "a pointer"; + case 's': case 'p': return "a string"; + case 0: return "end"; + default: return "unparsable format string"; + } +} +static size_t __Pyx_BufFmt_TypeCharToStandardSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return 2; + case 'i': case 'I': case 'l': case 'L': return 4; + case 'q': case 'Q': return 8; + case 'f': return (is_complex ? 8 : 4); + case 'd': return (is_complex ? 16 : 8); + case 'g': { + PyErr_SetString(PyExc_ValueError, "Python does not define a standard format string size for long double ('g').."); + return 0; + } + case 'O': case 'P': return sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static size_t __Pyx_BufFmt_TypeCharToNativeSize(char ch, int is_complex) { + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(short); + case 'i': case 'I': return sizeof(int); + case 'l': case 'L': return sizeof(long); + #ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(PY_LONG_LONG); + #endif + case 'f': return sizeof(float) * (is_complex ? 2 : 1); + case 'd': return sizeof(double) * (is_complex ? 2 : 1); + case 'g': return sizeof(long double) * (is_complex ? 2 : 1); + case 'O': case 'P': return sizeof(void*); + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +typedef struct { char c; short x; } __Pyx_st_short; +typedef struct { char c; int x; } __Pyx_st_int; +typedef struct { char c; long x; } __Pyx_st_long; +typedef struct { char c; float x; } __Pyx_st_float; +typedef struct { char c; double x; } __Pyx_st_double; +typedef struct { char c; long double x; } __Pyx_st_longdouble; +typedef struct { char c; void *x; } __Pyx_st_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { char c; PY_LONG_LONG x; } __Pyx_st_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToAlignment(char ch, int is_complex) { + CYTHON_UNUSED_VAR(is_complex); + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_st_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_st_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_st_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_st_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_st_float) - sizeof(float); + case 'd': return sizeof(__Pyx_st_double) - sizeof(double); + case 'g': return sizeof(__Pyx_st_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_st_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +/* These are for computing the padding at the end of the struct to align + on the first member of the struct. This will probably the same as above, + but we don't have any guarantees. + */ +typedef struct { short x; char c; } __Pyx_pad_short; +typedef struct { int x; char c; } __Pyx_pad_int; +typedef struct { long x; char c; } __Pyx_pad_long; +typedef struct { float x; char c; } __Pyx_pad_float; +typedef struct { double x; char c; } __Pyx_pad_double; +typedef struct { long double x; char c; } __Pyx_pad_longdouble; +typedef struct { void *x; char c; } __Pyx_pad_void_p; +#ifdef HAVE_LONG_LONG +typedef struct { PY_LONG_LONG x; char c; } __Pyx_pad_longlong; +#endif +static size_t __Pyx_BufFmt_TypeCharToPadding(char ch, int is_complex) { + CYTHON_UNUSED_VAR(is_complex); + switch (ch) { + case '?': case 'c': case 'b': case 'B': case 's': case 'p': return 1; + case 'h': case 'H': return sizeof(__Pyx_pad_short) - sizeof(short); + case 'i': case 'I': return sizeof(__Pyx_pad_int) - sizeof(int); + case 'l': case 'L': return sizeof(__Pyx_pad_long) - sizeof(long); +#ifdef HAVE_LONG_LONG + case 'q': case 'Q': return sizeof(__Pyx_pad_longlong) - sizeof(PY_LONG_LONG); +#endif + case 'f': return sizeof(__Pyx_pad_float) - sizeof(float); + case 'd': return sizeof(__Pyx_pad_double) - sizeof(double); + case 'g': return sizeof(__Pyx_pad_longdouble) - sizeof(long double); + case 'P': case 'O': return sizeof(__Pyx_pad_void_p) - sizeof(void*); + default: + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } +} +static char __Pyx_BufFmt_TypeCharToGroup(char ch, int is_complex) { + switch (ch) { + case 'c': + return 'H'; + case 'b': case 'h': case 'i': + case 'l': case 'q': case 's': case 'p': + return 'I'; + case '?': case 'B': case 'H': case 'I': case 'L': case 'Q': + return 'U'; + case 'f': case 'd': case 'g': + return (is_complex ? 'C' : 'R'); + case 'O': + return 'O'; + case 'P': + return 'P'; + default: { + __Pyx_BufFmt_RaiseUnexpectedChar(ch); + return 0; + } + } +} +static void __Pyx_BufFmt_RaiseExpected(__Pyx_BufFmt_Context* ctx) { + if (ctx->head == NULL || ctx->head->field == &ctx->root) { + const char* expected; + const char* quote; + if (ctx->head == NULL) { + expected = "end"; + quote = ""; + } else { + expected = ctx->head->field->type->name; + quote = "'"; + } + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected %s%s%s but got %s", + quote, expected, quote, + __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex)); + } else { + __Pyx_StructField* field = ctx->head->field; + __Pyx_StructField* parent = (ctx->head - 1)->field; + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch, expected '%s' but got %s in '%s.%s'", + field->type->name, __Pyx_BufFmt_DescribeTypeChar(ctx->enc_type, ctx->is_complex), + parent->type->name, field->name); + } +} +static int __Pyx_BufFmt_ProcessTypeChunk(__Pyx_BufFmt_Context* ctx) { + char group; + size_t size, offset, arraysize = 1; + if (ctx->enc_type == 0) return 0; + if (ctx->head->field->type->arraysize[0]) { + int i, ndim = 0; + if (ctx->enc_type == 's' || ctx->enc_type == 'p') { + ctx->is_valid_array = ctx->head->field->type->ndim == 1; + ndim = 1; + if (ctx->enc_count != ctx->head->field->type->arraysize[0]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %zu", + ctx->head->field->type->arraysize[0], ctx->enc_count); + return -1; + } + } + if (!ctx->is_valid_array) { + PyErr_Format(PyExc_ValueError, "Expected %d dimensions, got %d", + ctx->head->field->type->ndim, ndim); + return -1; + } + for (i = 0; i < ctx->head->field->type->ndim; i++) { + arraysize *= ctx->head->field->type->arraysize[i]; + } + ctx->is_valid_array = 0; + ctx->enc_count = 1; + } + group = __Pyx_BufFmt_TypeCharToGroup(ctx->enc_type, ctx->is_complex); + do { + __Pyx_StructField* field = ctx->head->field; + __Pyx_TypeInfo* type = field->type; + if (ctx->enc_packmode == '@' || ctx->enc_packmode == '^') { + size = __Pyx_BufFmt_TypeCharToNativeSize(ctx->enc_type, ctx->is_complex); + } else { + size = __Pyx_BufFmt_TypeCharToStandardSize(ctx->enc_type, ctx->is_complex); + } + if (ctx->enc_packmode == '@') { + size_t align_at = __Pyx_BufFmt_TypeCharToAlignment(ctx->enc_type, ctx->is_complex); + size_t align_mod_offset; + if (align_at == 0) return -1; + align_mod_offset = ctx->fmt_offset % align_at; + if (align_mod_offset > 0) ctx->fmt_offset += align_at - align_mod_offset; + if (ctx->struct_alignment == 0) + ctx->struct_alignment = __Pyx_BufFmt_TypeCharToPadding(ctx->enc_type, + ctx->is_complex); + } + if (type->size != size || type->typegroup != group) { + if (type->typegroup == 'C' && type->fields != NULL) { + size_t parent_offset = ctx->head->parent_offset + field->offset; + ++ctx->head; + ctx->head->field = type->fields; + ctx->head->parent_offset = parent_offset; + continue; + } + if ((type->typegroup == 'H' || group == 'H') && type->size == size) { + } else { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + } + offset = ctx->head->parent_offset + field->offset; + if (ctx->fmt_offset != offset) { + PyErr_Format(PyExc_ValueError, + "Buffer dtype mismatch; next field is at offset %" CYTHON_FORMAT_SSIZE_T "d but %" CYTHON_FORMAT_SSIZE_T "d expected", + (Py_ssize_t)ctx->fmt_offset, (Py_ssize_t)offset); + return -1; + } + ctx->fmt_offset += size; + if (arraysize) + ctx->fmt_offset += (arraysize - 1) * size; + --ctx->enc_count; + while (1) { + if (field == &ctx->root) { + ctx->head = NULL; + if (ctx->enc_count != 0) { + __Pyx_BufFmt_RaiseExpected(ctx); + return -1; + } + break; + } + ctx->head->field = ++field; + if (field->type == NULL) { + --ctx->head; + field = ctx->head->field; + continue; + } else if (field->type->typegroup == 'S') { + size_t parent_offset = ctx->head->parent_offset + field->offset; + if (field->type->fields->type == NULL) continue; + field = field->type->fields; + ++ctx->head; + ctx->head->field = field; + ctx->head->parent_offset = parent_offset; + break; + } else { + break; + } + } + } while (ctx->enc_count); + ctx->enc_type = 0; + ctx->is_complex = 0; + return 0; +} +static int +__pyx_buffmt_parse_array(__Pyx_BufFmt_Context* ctx, const char** tsp) +{ + const char *ts = *tsp; + int i = 0, number, ndim; + ++ts; + if (ctx->new_count != 1) { + PyErr_SetString(PyExc_ValueError, + "Cannot handle repeated arrays in format string"); + return -1; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return -1; + ndim = ctx->head->field->type->ndim; + while (*ts && *ts != ')') { + switch (*ts) { + case ' ': case '\f': case '\r': case '\n': case '\t': case '\v': continue; + default: break; + } + number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return -1; + if (i < ndim && (size_t) number != ctx->head->field->type->arraysize[i]) { + PyErr_Format(PyExc_ValueError, + "Expected a dimension of size %zu, got %d", + ctx->head->field->type->arraysize[i], number); + return -1; + } + if (*ts != ',' && *ts != ')') { + PyErr_Format(PyExc_ValueError, + "Expected a comma in format string, got '%c'", *ts); + return -1; + } + if (*ts == ',') ts++; + i++; + } + if (i != ndim) { + PyErr_Format(PyExc_ValueError, "Expected %d dimension(s), got %d", + ctx->head->field->type->ndim, i); + return -1; + } + if (!*ts) { + PyErr_SetString(PyExc_ValueError, + "Unexpected end of format string, expected ')'"); + return -1; + } + ctx->is_valid_array = 1; + ctx->new_count = 1; + *tsp = ++ts; + return 0; +} +static const char* __Pyx_BufFmt_CheckString(__Pyx_BufFmt_Context* ctx, const char* ts) { + int got_Z = 0; + while (1) { + switch(*ts) { + case 0: + if (ctx->enc_type != 0 && ctx->head == NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + if (ctx->head != NULL) { + __Pyx_BufFmt_RaiseExpected(ctx); + return NULL; + } + return ts; + case ' ': + case '\r': + case '\n': + ++ts; + break; + case '<': + if (!__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Little-endian buffer not supported on big-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '>': + case '!': + if (__Pyx_Is_Little_Endian()) { + PyErr_SetString(PyExc_ValueError, "Big-endian buffer not supported on little-endian compiler"); + return NULL; + } + ctx->new_packmode = '='; + ++ts; + break; + case '=': + case '@': + case '^': + ctx->new_packmode = *ts++; + break; + case 'T': + { + const char* ts_after_sub; + size_t i, struct_count = ctx->new_count; + size_t struct_alignment = ctx->struct_alignment; + ctx->new_count = 1; + ++ts; + if (*ts != '{') { + PyErr_SetString(PyExc_ValueError, "Buffer acquisition: Expected '{' after 'T'"); + return NULL; + } + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + ctx->enc_count = 0; + ctx->struct_alignment = 0; + ++ts; + ts_after_sub = ts; + for (i = 0; i != struct_count; ++i) { + ts_after_sub = __Pyx_BufFmt_CheckString(ctx, ts); + if (!ts_after_sub) return NULL; + } + ts = ts_after_sub; + if (struct_alignment) ctx->struct_alignment = struct_alignment; + } + break; + case '}': + { + size_t alignment = ctx->struct_alignment; + ++ts; + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_type = 0; + if (alignment && ctx->fmt_offset % alignment) { + ctx->fmt_offset += alignment - (ctx->fmt_offset % alignment); + } + } + return ts; + case 'x': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->fmt_offset += ctx->new_count; + ctx->new_count = 1; + ctx->enc_count = 0; + ctx->enc_type = 0; + ctx->enc_packmode = ctx->new_packmode; + ++ts; + break; + case 'Z': + got_Z = 1; + ++ts; + if (*ts != 'f' && *ts != 'd' && *ts != 'g') { + __Pyx_BufFmt_RaiseUnexpectedChar('Z'); + return NULL; + } + CYTHON_FALLTHROUGH; + case '?': case 'c': case 'b': case 'B': case 'h': case 'H': case 'i': case 'I': + case 'l': case 'L': case 'q': case 'Q': + case 'f': case 'd': case 'g': + case 'O': case 'p': + if ((ctx->enc_type == *ts) && (got_Z == ctx->is_complex) && + (ctx->enc_packmode == ctx->new_packmode) && (!ctx->is_valid_array)) { + ctx->enc_count += ctx->new_count; + ctx->new_count = 1; + got_Z = 0; + ++ts; + break; + } + CYTHON_FALLTHROUGH; + case 's': + if (__Pyx_BufFmt_ProcessTypeChunk(ctx) == -1) return NULL; + ctx->enc_count = ctx->new_count; + ctx->enc_packmode = ctx->new_packmode; + ctx->enc_type = *ts; + ctx->is_complex = got_Z; + ++ts; + ctx->new_count = 1; + got_Z = 0; + break; + case ':': + ++ts; + while(*ts != ':') ++ts; + ++ts; + break; + case '(': + if (__pyx_buffmt_parse_array(ctx, &ts) < 0) return NULL; + break; + default: + { + int number = __Pyx_BufFmt_ExpectNumber(&ts); + if (number == -1) return NULL; + ctx->new_count = (size_t)number; + } + } + } +} + +/* TypeInfoCompare */ + static int +__pyx_typeinfo_cmp(__Pyx_TypeInfo *a, __Pyx_TypeInfo *b) +{ + int i; + if (!a || !b) + return 0; + if (a == b) + return 1; + if (a->size != b->size || a->typegroup != b->typegroup || + a->is_unsigned != b->is_unsigned || a->ndim != b->ndim) { + if (a->typegroup == 'H' || b->typegroup == 'H') { + return a->size == b->size; + } else { + return 0; + } + } + if (a->ndim) { + for (i = 0; i < a->ndim; i++) + if (a->arraysize[i] != b->arraysize[i]) + return 0; + } + if (a->typegroup == 'S') { + if (a->flags != b->flags) + return 0; + if (a->fields || b->fields) { + if (!(a->fields && b->fields)) + return 0; + for (i = 0; a->fields[i].type && b->fields[i].type; i++) { + __Pyx_StructField *field_a = a->fields + i; + __Pyx_StructField *field_b = b->fields + i; + if (field_a->offset != field_b->offset || + !__pyx_typeinfo_cmp(field_a->type, field_b->type)) + return 0; + } + return !a->fields[i].type && !b->fields[i].type; + } + } + return 1; +} + +/* MemviewSliceValidateAndInit */ + static int +__pyx_check_strides(Py_buffer *buf, int dim, int ndim, int spec) +{ + if (buf->shape[dim] <= 1) + return 1; + if (buf->strides) { + if (spec & __Pyx_MEMVIEW_CONTIG) { + if (spec & (__Pyx_MEMVIEW_PTR|__Pyx_MEMVIEW_FULL)) { + if (unlikely(buf->strides[dim] != sizeof(void *))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly contiguous " + "in dimension %d.", dim); + goto fail; + } + } else if (unlikely(buf->strides[dim] != buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_FOLLOW) { + Py_ssize_t stride = buf->strides[dim]; + if (stride < 0) + stride = -stride; + if (unlikely(stride < buf->itemsize)) { + PyErr_SetString(PyExc_ValueError, + "Buffer and memoryview are not contiguous " + "in the same dimension."); + goto fail; + } + } + } else { + if (unlikely(spec & __Pyx_MEMVIEW_CONTIG && dim != ndim - 1)) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not contiguous in " + "dimension %d", dim); + goto fail; + } else if (unlikely(spec & (__Pyx_MEMVIEW_PTR))) { + PyErr_Format(PyExc_ValueError, + "C-contiguous buffer is not indirect in " + "dimension %d", dim); + goto fail; + } else if (unlikely(buf->suboffsets)) { + PyErr_SetString(PyExc_ValueError, + "Buffer exposes suboffsets but no strides"); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_check_suboffsets(Py_buffer *buf, int dim, int ndim, int spec) +{ + CYTHON_UNUSED_VAR(ndim); + if (spec & __Pyx_MEMVIEW_DIRECT) { + if (unlikely(buf->suboffsets && buf->suboffsets[dim] >= 0)) { + PyErr_Format(PyExc_ValueError, + "Buffer not compatible with direct access " + "in dimension %d.", dim); + goto fail; + } + } + if (spec & __Pyx_MEMVIEW_PTR) { + if (unlikely(!buf->suboffsets || (buf->suboffsets[dim] < 0))) { + PyErr_Format(PyExc_ValueError, + "Buffer is not indirectly accessible " + "in dimension %d.", dim); + goto fail; + } + } + return 1; +fail: + return 0; +} +static int +__pyx_verify_contig(Py_buffer *buf, int ndim, int c_or_f_flag) +{ + int i; + if (c_or_f_flag & __Pyx_IS_F_CONTIG) { + Py_ssize_t stride = 1; + for (i = 0; i < ndim; i++) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not fortran contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } else if (c_or_f_flag & __Pyx_IS_C_CONTIG) { + Py_ssize_t stride = 1; + for (i = ndim - 1; i >- 1; i--) { + if (unlikely(stride * buf->itemsize != buf->strides[i] && buf->shape[i] > 1)) { + PyErr_SetString(PyExc_ValueError, + "Buffer not C contiguous."); + goto fail; + } + stride = stride * buf->shape[i]; + } + } + return 1; +fail: + return 0; +} +static int __Pyx_ValidateAndInit_memviewslice( + int *axes_specs, + int c_or_f_flag, + int buf_flags, + int ndim, + __Pyx_TypeInfo *dtype, + __Pyx_BufFmt_StackElem stack[], + __Pyx_memviewslice *memviewslice, + PyObject *original_obj) +{ + struct __pyx_memoryview_obj *memview, *new_memview; + __Pyx_RefNannyDeclarations + Py_buffer *buf; + int i, spec = 0, retval = -1; + __Pyx_BufFmt_Context ctx; + int from_memoryview = __pyx_memoryview_check(original_obj); + __Pyx_RefNannySetupContext("ValidateAndInit_memviewslice", 0); + if (from_memoryview && __pyx_typeinfo_cmp(dtype, ((struct __pyx_memoryview_obj *) + original_obj)->typeinfo)) { + memview = (struct __pyx_memoryview_obj *) original_obj; + new_memview = NULL; + } else { + memview = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + original_obj, buf_flags, 0, dtype); + new_memview = memview; + if (unlikely(!memview)) + goto fail; + } + buf = &memview->view; + if (unlikely(buf->ndim != ndim)) { + PyErr_Format(PyExc_ValueError, + "Buffer has wrong number of dimensions (expected %d, got %d)", + ndim, buf->ndim); + goto fail; + } + if (new_memview) { + __Pyx_BufFmt_Init(&ctx, stack, dtype); + if (unlikely(!__Pyx_BufFmt_CheckString(&ctx, buf->format))) goto fail; + } + if (unlikely((unsigned) buf->itemsize != dtype->size)) { + PyErr_Format(PyExc_ValueError, + "Item size of buffer (%" CYTHON_FORMAT_SSIZE_T "u byte%s) " + "does not match size of '%s' (%" CYTHON_FORMAT_SSIZE_T "u byte%s)", + buf->itemsize, + (buf->itemsize > 1) ? "s" : "", + dtype->name, + dtype->size, + (dtype->size > 1) ? "s" : ""); + goto fail; + } + if (buf->len > 0) { + for (i = 0; i < ndim; i++) { + spec = axes_specs[i]; + if (unlikely(!__pyx_check_strides(buf, i, ndim, spec))) + goto fail; + if (unlikely(!__pyx_check_suboffsets(buf, i, ndim, spec))) + goto fail; + } + if (unlikely(buf->strides && !__pyx_verify_contig(buf, ndim, c_or_f_flag))) + goto fail; + } + if (unlikely(__Pyx_init_memviewslice(memview, ndim, memviewslice, + new_memview != NULL) == -1)) { + goto fail; + } + retval = 0; + goto no_fail; +fail: + Py_XDECREF(new_memview); + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsds_float(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, + PyBUF_RECORDS_RO | writable_flag, 2, + &__Pyx_TypeInfo_float, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsdsds_float(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, + PyBUF_RECORDS_RO | writable_flag, 4, + &__Pyx_TypeInfo_float, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_dsdsds_float(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED), (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, + PyBUF_RECORDS_RO | writable_flag, 3, + &__Pyx_TypeInfo_float, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* ObjectToMemviewSlice */ + static CYTHON_INLINE __Pyx_memviewslice __Pyx_PyObject_to_MemoryviewSlice_ds_int(PyObject *obj, int writable_flag) { + __Pyx_memviewslice result = { 0, 0, { 0 }, { 0 }, { 0 } }; + __Pyx_BufFmt_StackElem stack[1]; + int axes_specs[] = { (__Pyx_MEMVIEW_DIRECT | __Pyx_MEMVIEW_STRIDED) }; + int retcode; + if (obj == Py_None) { + result.memview = (struct __pyx_memoryview_obj *) Py_None; + return result; + } + retcode = __Pyx_ValidateAndInit_memviewslice(axes_specs, 0, + PyBUF_RECORDS_RO | writable_flag, 1, + &__Pyx_TypeInfo_int, stack, + &result, obj); + if (unlikely(retcode == -1)) + goto __pyx_fail; + return result; +__pyx_fail: + result.memview = NULL; + result.data = NULL; + return result; +} + +/* MemviewDtypeToObject */ + static CYTHON_INLINE PyObject *__pyx_memview_get_float(const char *itemp) { + return (PyObject *) PyFloat_FromDouble(*(float *) itemp); +} +static CYTHON_INLINE int __pyx_memview_set_float(const char *itemp, PyObject *obj) { + float value = __pyx_PyFloat_AsFloat(obj); + if (unlikely((value == (float)-1) && PyErr_Occurred())) + return 0; + *(float *) itemp = value; + return 1; +} + +/* CIntFromPyVerify */ + #define __PYX_VERIFY_RETURN_INT(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 0) +#define __PYX_VERIFY_RETURN_INT_EXC(target_type, func_type, func_value)\ + __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, 1) +#define __PYX__VERIFY_RETURN_INT(target_type, func_type, func_value, exc)\ + {\ + func_type value = func_value;\ + if (sizeof(target_type) < sizeof(func_type)) {\ + if (unlikely(value != (func_type) (target_type) value)) {\ + func_type zero = 0;\ + if (exc && unlikely(value == (func_type)-1 && PyErr_Occurred()))\ + return (target_type) -1;\ + if (is_unsigned && unlikely(value < zero))\ + goto raise_neg_overflow;\ + else\ + goto raise_overflow;\ + }\ + }\ + return (target_type) value;\ + } + +/* Declarations */ + #if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return ::std::complex< float >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + return x + y*(__pyx_t_float_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_float_complex __pyx_t_float_complex_from_parts(float x, float y) { + __pyx_t_float_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) +#else + static CYTHON_INLINE int __Pyx_c_eq_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_sum_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_diff_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_prod_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabsf(b.real) >= fabsf(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + float r = b.imag / b.real; + float s = (float)(1.0) / (b.real + b.imag * r); + return __pyx_t_float_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + float r = b.real / b.imag; + float s = (float)(1.0) / (b.imag + b.real * r); + return __pyx_t_float_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_quot_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + if (b.imag == 0) { + return __pyx_t_float_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + float denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_float_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_neg_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_float(__pyx_t_float_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_conj_float(__pyx_t_float_complex a) { + __pyx_t_float_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE float __Pyx_c_abs_float(__pyx_t_float_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrtf(z.real*z.real + z.imag*z.imag); + #else + return hypotf(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_float_complex __Pyx_c_pow_float(__pyx_t_float_complex a, __pyx_t_float_complex b) { + __pyx_t_float_complex z; + float r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + float denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_float(a, a); + case 3: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, a); + case 4: + z = __Pyx_c_prod_float(a, a); + return __Pyx_c_prod_float(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if ((b.imag == 0) && (a.real >= 0)) { + z.real = powf(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2f(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_float(a); + theta = atan2f(a.imag, a.real); + } + lnr = logf(r); + z_r = expf(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cosf(z_theta); + z.imag = z_r * sinf(z_theta); + return z; + } + #endif +#endif + +/* Declarations */ + #if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) + #ifdef __cplusplus + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return ::std::complex< double >(x, y); + } + #else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + return x + y*(__pyx_t_double_complex)_Complex_I; + } + #endif +#else + static CYTHON_INLINE __pyx_t_double_complex __pyx_t_double_complex_from_parts(double x, double y) { + __pyx_t_double_complex z; + z.real = x; + z.imag = y; + return z; + } +#endif + +/* Arithmetic */ + #if CYTHON_CCOMPLEX && (1) && (!0 || __cplusplus) +#else + static CYTHON_INLINE int __Pyx_c_eq_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + return (a.real == b.real) && (a.imag == b.imag); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_sum_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real + b.real; + z.imag = a.imag + b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_diff_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real - b.real; + z.imag = a.imag - b.imag; + return z; + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_prod_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + z.real = a.real * b.real - a.imag * b.imag; + z.imag = a.real * b.imag + a.imag * b.real; + return z; + } + #if 1 + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else if (fabs(b.real) >= fabs(b.imag)) { + if (b.real == 0 && b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.imag); + } else { + double r = b.imag / b.real; + double s = (double)(1.0) / (b.real + b.imag * r); + return __pyx_t_double_complex_from_parts( + (a.real + a.imag * r) * s, (a.imag - a.real * r) * s); + } + } else { + double r = b.real / b.imag; + double s = (double)(1.0) / (b.imag + b.real * r); + return __pyx_t_double_complex_from_parts( + (a.real * r + a.imag) * s, (a.imag * r - a.real) * s); + } + } + #else + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_quot_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + if (b.imag == 0) { + return __pyx_t_double_complex_from_parts(a.real / b.real, a.imag / b.real); + } else { + double denom = b.real * b.real + b.imag * b.imag; + return __pyx_t_double_complex_from_parts( + (a.real * b.real + a.imag * b.imag) / denom, + (a.imag * b.real - a.real * b.imag) / denom); + } + } + #endif + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_neg_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = -a.real; + z.imag = -a.imag; + return z; + } + static CYTHON_INLINE int __Pyx_c_is_zero_double(__pyx_t_double_complex a) { + return (a.real == 0) && (a.imag == 0); + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_conj_double(__pyx_t_double_complex a) { + __pyx_t_double_complex z; + z.real = a.real; + z.imag = -a.imag; + return z; + } + #if 1 + static CYTHON_INLINE double __Pyx_c_abs_double(__pyx_t_double_complex z) { + #if !defined(HAVE_HYPOT) || defined(_MSC_VER) + return sqrt(z.real*z.real + z.imag*z.imag); + #else + return hypot(z.real, z.imag); + #endif + } + static CYTHON_INLINE __pyx_t_double_complex __Pyx_c_pow_double(__pyx_t_double_complex a, __pyx_t_double_complex b) { + __pyx_t_double_complex z; + double r, lnr, theta, z_r, z_theta; + if (b.imag == 0 && b.real == (int)b.real) { + if (b.real < 0) { + double denom = a.real * a.real + a.imag * a.imag; + a.real = a.real / denom; + a.imag = -a.imag / denom; + b.real = -b.real; + } + switch ((int)b.real) { + case 0: + z.real = 1; + z.imag = 0; + return z; + case 1: + return a; + case 2: + return __Pyx_c_prod_double(a, a); + case 3: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, a); + case 4: + z = __Pyx_c_prod_double(a, a); + return __Pyx_c_prod_double(z, z); + } + } + if (a.imag == 0) { + if (a.real == 0) { + return a; + } else if ((b.imag == 0) && (a.real >= 0)) { + z.real = pow(a.real, b.real); + z.imag = 0; + return z; + } else if (a.real > 0) { + r = a.real; + theta = 0; + } else { + r = -a.real; + theta = atan2(0.0, -1.0); + } + } else { + r = __Pyx_c_abs_double(a); + theta = atan2(a.imag, a.real); + } + lnr = log(r); + z_r = exp(lnr * b.real - theta * b.imag); + z_theta = theta * b.real + lnr * b.imag; + z.real = z_r * cos(z_theta); + z.imag = z_r * sin(z_theta); + return z; + } + #endif +#endif + +/* MemviewSliceCopyTemplate */ + static __Pyx_memviewslice +__pyx_memoryview_copy_new_contig(const __Pyx_memviewslice *from_mvs, + const char *mode, int ndim, + size_t sizeof_dtype, int contig_flag, + int dtype_is_object) +{ + __Pyx_RefNannyDeclarations + int i; + __Pyx_memviewslice new_mvs = { 0, 0, { 0 }, { 0 }, { 0 } }; + struct __pyx_memoryview_obj *from_memview = from_mvs->memview; + Py_buffer *buf = &from_memview->view; + PyObject *shape_tuple = NULL; + PyObject *temp_int = NULL; + struct __pyx_array_obj *array_obj = NULL; + struct __pyx_memoryview_obj *memview_obj = NULL; + __Pyx_RefNannySetupContext("__pyx_memoryview_copy_new_contig", 0); + for (i = 0; i < ndim; i++) { + if (unlikely(from_mvs->suboffsets[i] >= 0)) { + PyErr_Format(PyExc_ValueError, "Cannot copy memoryview slice with " + "indirect dimensions (axis %d)", i); + goto fail; + } + } + shape_tuple = PyTuple_New(ndim); + if (unlikely(!shape_tuple)) { + goto fail; + } + __Pyx_GOTREF(shape_tuple); + for(i = 0; i < ndim; i++) { + temp_int = PyInt_FromSsize_t(from_mvs->shape[i]); + if(unlikely(!temp_int)) { + goto fail; + } else { + PyTuple_SET_ITEM(shape_tuple, i, temp_int); + temp_int = NULL; + } + } + array_obj = __pyx_array_new(shape_tuple, sizeof_dtype, buf->format, (char *) mode, NULL); + if (unlikely(!array_obj)) { + goto fail; + } + __Pyx_GOTREF(array_obj); + memview_obj = (struct __pyx_memoryview_obj *) __pyx_memoryview_new( + (PyObject *) array_obj, contig_flag, + dtype_is_object, + from_mvs->memview->typeinfo); + if (unlikely(!memview_obj)) + goto fail; + if (unlikely(__Pyx_init_memviewslice(memview_obj, ndim, &new_mvs, 1) < 0)) + goto fail; + if (unlikely(__pyx_memoryview_copy_contents(*from_mvs, new_mvs, ndim, ndim, + dtype_is_object) < 0)) + goto fail; + goto no_fail; +fail: + __Pyx_XDECREF(new_mvs.memview); + new_mvs.memview = NULL; + new_mvs.data = NULL; +no_fail: + __Pyx_XDECREF(shape_tuple); + __Pyx_XDECREF(temp_int); + __Pyx_XDECREF(array_obj); + __Pyx_RefNannyFinishContext(); + return new_mvs; +} + +/* MemviewSliceInit */ + static int +__Pyx_init_memviewslice(struct __pyx_memoryview_obj *memview, + int ndim, + __Pyx_memviewslice *memviewslice, + int memview_is_new_reference) +{ + __Pyx_RefNannyDeclarations + int i, retval=-1; + Py_buffer *buf = &memview->view; + __Pyx_RefNannySetupContext("init_memviewslice", 0); + if (unlikely(memviewslice->memview || memviewslice->data)) { + PyErr_SetString(PyExc_ValueError, + "memviewslice is already initialized!"); + goto fail; + } + if (buf->strides) { + for (i = 0; i < ndim; i++) { + memviewslice->strides[i] = buf->strides[i]; + } + } else { + Py_ssize_t stride = buf->itemsize; + for (i = ndim - 1; i >= 0; i--) { + memviewslice->strides[i] = stride; + stride *= buf->shape[i]; + } + } + for (i = 0; i < ndim; i++) { + memviewslice->shape[i] = buf->shape[i]; + if (buf->suboffsets) { + memviewslice->suboffsets[i] = buf->suboffsets[i]; + } else { + memviewslice->suboffsets[i] = -1; + } + } + memviewslice->memview = memview; + memviewslice->data = (char *)buf->buf; + if (__pyx_add_acquisition_count(memview) == 0 && !memview_is_new_reference) { + Py_INCREF(memview); + } + retval = 0; + goto no_fail; +fail: + memviewslice->memview = 0; + memviewslice->data = 0; + retval = -1; +no_fail: + __Pyx_RefNannyFinishContext(); + return retval; +} +#ifndef Py_NO_RETURN +#define Py_NO_RETURN +#endif +static void __pyx_fatalerror(const char *fmt, ...) Py_NO_RETURN { + va_list vargs; + char msg[200]; +#if PY_VERSION_HEX >= 0x030A0000 || defined(HAVE_STDARG_PROTOTYPES) + va_start(vargs, fmt); +#else + va_start(vargs); +#endif + vsnprintf(msg, 200, fmt, vargs); + va_end(vargs); + Py_FatalError(msg); +} +static CYTHON_INLINE int +__pyx_add_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)++; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE int +__pyx_sub_acquisition_count_locked(__pyx_atomic_int_type *acquisition_count, + PyThread_type_lock lock) +{ + int result; + PyThread_acquire_lock(lock, 1); + result = (*acquisition_count)--; + PyThread_release_lock(lock); + return result; +} +static CYTHON_INLINE void +__Pyx_INC_MEMVIEW(__Pyx_memviewslice *memslice, int have_gil, int lineno) +{ + __pyx_nonatomic_int_type old_acquisition_count; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + return; + } + old_acquisition_count = __pyx_add_acquisition_count(memview); + if (unlikely(old_acquisition_count <= 0)) { + if (likely(old_acquisition_count == 0)) { + if (have_gil) { + Py_INCREF((PyObject *) memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_INCREF((PyObject *) memview); + PyGILState_Release(_gilstate); + } + } else { + __pyx_fatalerror("Acquisition count is %d (line %d)", + old_acquisition_count+1, lineno); + } + } +} +static CYTHON_INLINE void __Pyx_XCLEAR_MEMVIEW(__Pyx_memviewslice *memslice, + int have_gil, int lineno) { + __pyx_nonatomic_int_type old_acquisition_count; + struct __pyx_memoryview_obj *memview = memslice->memview; + if (unlikely(!memview || (PyObject *) memview == Py_None)) { + memslice->memview = NULL; + return; + } + old_acquisition_count = __pyx_sub_acquisition_count(memview); + memslice->data = NULL; + if (likely(old_acquisition_count > 1)) { + memslice->memview = NULL; + } else if (likely(old_acquisition_count == 1)) { + if (have_gil) { + Py_CLEAR(memslice->memview); + } else { + PyGILState_STATE _gilstate = PyGILState_Ensure(); + Py_CLEAR(memslice->memview); + PyGILState_Release(_gilstate); + } + } else { + __pyx_fatalerror("Acquisition count is %d (line %d)", + old_acquisition_count-1, lineno); + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_long(long value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(long) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(long) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(long) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(long) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; +#if !CYTHON_COMPILING_IN_LIMITED_API + return _PyLong_FromByteArray(bytes, sizeof(long), + little, !is_unsigned); +#else + PyObject *from_bytes, *result = NULL; + PyObject *py_bytes = NULL, *arg_tuple = NULL, *kwds = NULL, *order_str = NULL; + from_bytes = PyObject_GetAttrString((PyObject*)&PyInt_Type, "from_bytes"); + if (!from_bytes) return NULL; + py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(long)); + if (!py_bytes) goto limited_bad; + order_str = PyUnicode_FromString(little ? "little" : "big"); + if (!order_str) goto limited_bad; + arg_tuple = PyTuple_Pack(2, py_bytes, order_str); + if (!arg_tuple) goto limited_bad; + kwds = PyDict_New(); + if (!kwds) goto limited_bad; + if (PyDict_SetItemString(kwds, "signed", __Pyx_NewRef(!is_unsigned ? Py_True : Py_False))) goto limited_bad; + result = PyObject_Call(from_bytes, arg_tuple, kwds); + limited_bad: + Py_XDECREF(from_bytes); + Py_XDECREF(py_bytes); + Py_XDECREF(order_str); + Py_XDECREF(arg_tuple); + Py_XDECREF(kwds); + return result; +#endif + } +} + +/* CIntToPy */ + static CYTHON_INLINE PyObject* __Pyx_PyInt_From_int(int value) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; + if (is_unsigned) { + if (sizeof(int) < sizeof(long)) { + return PyInt_FromLong((long) value); + } else if (sizeof(int) <= sizeof(unsigned long)) { + return PyLong_FromUnsignedLong((unsigned long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(unsigned PY_LONG_LONG)) { + return PyLong_FromUnsignedLongLong((unsigned PY_LONG_LONG) value); +#endif + } + } else { + if (sizeof(int) <= sizeof(long)) { + return PyInt_FromLong((long) value); +#ifdef HAVE_LONG_LONG + } else if (sizeof(int) <= sizeof(PY_LONG_LONG)) { + return PyLong_FromLongLong((PY_LONG_LONG) value); +#endif + } + } + { + int one = 1; int little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&value; +#if !CYTHON_COMPILING_IN_LIMITED_API + return _PyLong_FromByteArray(bytes, sizeof(int), + little, !is_unsigned); +#else + PyObject *from_bytes, *result = NULL; + PyObject *py_bytes = NULL, *arg_tuple = NULL, *kwds = NULL, *order_str = NULL; + from_bytes = PyObject_GetAttrString((PyObject*)&PyInt_Type, "from_bytes"); + if (!from_bytes) return NULL; + py_bytes = PyBytes_FromStringAndSize((char*)bytes, sizeof(int)); + if (!py_bytes) goto limited_bad; + order_str = PyUnicode_FromString(little ? "little" : "big"); + if (!order_str) goto limited_bad; + arg_tuple = PyTuple_Pack(2, py_bytes, order_str); + if (!arg_tuple) goto limited_bad; + kwds = PyDict_New(); + if (!kwds) goto limited_bad; + if (PyDict_SetItemString(kwds, "signed", __Pyx_NewRef(!is_unsigned ? Py_True : Py_False))) goto limited_bad; + result = PyObject_Call(from_bytes, arg_tuple, kwds); + limited_bad: + Py_XDECREF(from_bytes); + Py_XDECREF(py_bytes); + Py_XDECREF(order_str); + Py_XDECREF(arg_tuple); + Py_XDECREF(kwds); + return result; +#endif + } +} + +/* CIntFromPy */ + static CYTHON_INLINE int __Pyx_PyInt_As_int(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const int neg_one = (int) -1, const_zero = (int) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if ((sizeof(int) < sizeof(long))) { + __PYX_VERIFY_RETURN_INT(int, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (int) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 2 * PyLong_SHIFT)) { + return (int) (((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 3 * PyLong_SHIFT)) { + return (int) (((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) >= 4 * PyLong_SHIFT)) { + return (int) (((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(int) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(int) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(int, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(int, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(int) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(int) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + return (int) ((((((int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(int) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(int) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + return (int) ((((((((int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(int) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { + return (int) (((int)-1)*(((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(int) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(int, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(int) - 1 > 4 * PyLong_SHIFT)) { + return (int) ((((((((((int)digits[3]) << PyLong_SHIFT) | (int)digits[2]) << PyLong_SHIFT) | (int)digits[1]) << PyLong_SHIFT) | (int)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(int) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(int, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(int) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(int, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { + int val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); +#if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } +#endif + if (likely(v)) { + int ret = -1; +#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + long idigit; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (unlikely(!PyLong_CheckExact(v))) { + PyObject *tmp = v; + v = PyNumber_Long(v); + assert(PyLong_CheckExact(v)); + Py_DECREF(tmp); + if (unlikely(!v)) return (int) -1; + } +#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + if (Py_SIZE(x) == 0) + return (int) 0; + is_negative = Py_SIZE(x) < 0; +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (int) -1; + is_negative = result == 1; + } +#endif + if (is_unsigned && unlikely(is_negative)) { + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + if (unlikely(!stepval)) + return (int) -1; + } else { + stepval = __Pyx_NewRef(v); + } + val = (int) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(int) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + val |= ((int) idigit) << bits; + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + if (Py_SIZE(stepval) == 0) + goto unpacking_done; + #endif + } + idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(int) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((int) idigit) << bits; + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + unpacking_done: + #endif + if (!is_unsigned) { + if (unlikely(val & (((int) 1) << (sizeof(int) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + Py_DECREF(v); + if (likely(!ret)) + return val; + } + return (int) -1; + } + } else { + int val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (int) -1; + val = __Pyx_PyInt_As_int(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to int"); + return (int) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to int"); + return (int) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE long __Pyx_PyInt_As_long(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const long neg_one = (long) -1, const_zero = (long) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if ((sizeof(long) < sizeof(long))) { + __PYX_VERIFY_RETURN_INT(long, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (long) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 2 * PyLong_SHIFT)) { + return (long) (((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 3 * PyLong_SHIFT)) { + return (long) (((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) >= 4 * PyLong_SHIFT)) { + return (long) (((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(long) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(long) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(long, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(long, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(long) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(long) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + return (long) ((((((long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(long) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(long) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + return (long) ((((((((long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(long) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { + return (long) (((long)-1)*(((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(long) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(long, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(long) - 1 > 4 * PyLong_SHIFT)) { + return (long) ((((((((((long)digits[3]) << PyLong_SHIFT) | (long)digits[2]) << PyLong_SHIFT) | (long)digits[1]) << PyLong_SHIFT) | (long)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(long) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(long, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(long) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(long, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { + long val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); +#if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } +#endif + if (likely(v)) { + int ret = -1; +#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + long idigit; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (unlikely(!PyLong_CheckExact(v))) { + PyObject *tmp = v; + v = PyNumber_Long(v); + assert(PyLong_CheckExact(v)); + Py_DECREF(tmp); + if (unlikely(!v)) return (long) -1; + } +#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + if (Py_SIZE(x) == 0) + return (long) 0; + is_negative = Py_SIZE(x) < 0; +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (long) -1; + is_negative = result == 1; + } +#endif + if (is_unsigned && unlikely(is_negative)) { + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + if (unlikely(!stepval)) + return (long) -1; + } else { + stepval = __Pyx_NewRef(v); + } + val = (long) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(long) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + val |= ((long) idigit) << bits; + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + if (Py_SIZE(stepval) == 0) + goto unpacking_done; + #endif + } + idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(long) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((long) idigit) << bits; + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + unpacking_done: + #endif + if (!is_unsigned) { + if (unlikely(val & (((long) 1) << (sizeof(long) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + Py_DECREF(v); + if (likely(!ret)) + return val; + } + return (long) -1; + } + } else { + long val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (long) -1; + val = __Pyx_PyInt_As_long(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to long"); + return (long) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to long"); + return (long) -1; +} + +/* CIntFromPy */ + static CYTHON_INLINE char __Pyx_PyInt_As_char(PyObject *x) { +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic push +#pragma GCC diagnostic ignored "-Wconversion" +#endif + const char neg_one = (char) -1, const_zero = (char) 0; +#ifdef __Pyx_HAS_GCC_DIAGNOSTIC +#pragma GCC diagnostic pop +#endif + const int is_unsigned = neg_one > const_zero; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x))) { + if ((sizeof(char) < sizeof(long))) { + __PYX_VERIFY_RETURN_INT(char, long, PyInt_AS_LONG(x)) + } else { + long val = PyInt_AS_LONG(x); + if (is_unsigned && unlikely(val < 0)) { + goto raise_neg_overflow; + } + return (char) val; + } + } else +#endif + if (likely(PyLong_Check(x))) { + if (is_unsigned) { +#if CYTHON_USE_PYLONG_INTERNALS + if (unlikely(__Pyx_PyLong_IsNeg(x))) { + goto raise_neg_overflow; + } else if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(char, __Pyx_compact_upylong, __Pyx_PyLong_CompactValueUnsigned(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_DigitCount(x)) { + case 2: + if ((8 * sizeof(char) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) >= 2 * PyLong_SHIFT)) { + return (char) (((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 3: + if ((8 * sizeof(char) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) >= 3 * PyLong_SHIFT)) { + return (char) (((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + case 4: + if ((8 * sizeof(char) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) >= 4 * PyLong_SHIFT)) { + return (char) (((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0])); + } + } + break; + } + } +#endif +#if CYTHON_COMPILING_IN_CPYTHON && PY_VERSION_HEX < 0x030C00A7 + if (unlikely(Py_SIZE(x) < 0)) { + goto raise_neg_overflow; + } +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + if (unlikely(result == 1)) + goto raise_neg_overflow; + } +#endif + if ((sizeof(char) <= sizeof(unsigned long))) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned long, PyLong_AsUnsignedLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(char) <= sizeof(unsigned PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(char, unsigned PY_LONG_LONG, PyLong_AsUnsignedLongLong(x)) +#endif + } + } else { +#if CYTHON_USE_PYLONG_INTERNALS + if (__Pyx_PyLong_IsCompact(x)) { + __PYX_VERIFY_RETURN_INT(char, __Pyx_compact_pylong, __Pyx_PyLong_CompactValue(x)) + } else { + const digit* digits = __Pyx_PyLong_Digits(x); + assert(__Pyx_PyLong_DigitCount(x) > 1); + switch (__Pyx_PyLong_SignedDigitCount(x)) { + case -2: + if ((8 * sizeof(char) - 1 > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) { + return (char) (((char)-1)*(((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 2: + if ((8 * sizeof(char) > 1 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 2 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) { + return (char) ((((((char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -3: + if ((8 * sizeof(char) - 1 > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) { + return (char) (((char)-1)*(((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 3: + if ((8 * sizeof(char) > 2 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 3 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) { + return (char) ((((((((char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case -4: + if ((8 * sizeof(char) - 1 > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, long, -(long) (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) - 1 > 4 * PyLong_SHIFT)) { + return (char) (((char)-1)*(((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + case 4: + if ((8 * sizeof(char) > 3 * PyLong_SHIFT)) { + if ((8 * sizeof(unsigned long) > 4 * PyLong_SHIFT)) { + __PYX_VERIFY_RETURN_INT(char, unsigned long, (((((((((unsigned long)digits[3]) << PyLong_SHIFT) | (unsigned long)digits[2]) << PyLong_SHIFT) | (unsigned long)digits[1]) << PyLong_SHIFT) | (unsigned long)digits[0]))) + } else if ((8 * sizeof(char) - 1 > 4 * PyLong_SHIFT)) { + return (char) ((((((((((char)digits[3]) << PyLong_SHIFT) | (char)digits[2]) << PyLong_SHIFT) | (char)digits[1]) << PyLong_SHIFT) | (char)digits[0]))); + } + } + break; + } + } +#endif + if ((sizeof(char) <= sizeof(long))) { + __PYX_VERIFY_RETURN_INT_EXC(char, long, PyLong_AsLong(x)) +#ifdef HAVE_LONG_LONG + } else if ((sizeof(char) <= sizeof(PY_LONG_LONG))) { + __PYX_VERIFY_RETURN_INT_EXC(char, PY_LONG_LONG, PyLong_AsLongLong(x)) +#endif + } + } + { + char val; + PyObject *v = __Pyx_PyNumber_IntOrLong(x); +#if PY_MAJOR_VERSION < 3 + if (likely(v) && !PyLong_Check(v)) { + PyObject *tmp = v; + v = PyNumber_Long(tmp); + Py_DECREF(tmp); + } +#endif + if (likely(v)) { + int ret = -1; +#if !(CYTHON_COMPILING_IN_PYPY || CYTHON_COMPILING_IN_LIMITED_API) || defined(_PyLong_AsByteArray) + int one = 1; int is_little = (int)*(unsigned char *)&one; + unsigned char *bytes = (unsigned char *)&val; + ret = _PyLong_AsByteArray((PyLongObject *)v, + bytes, sizeof(val), + is_little, !is_unsigned); +#else + PyObject *stepval = NULL, *mask = NULL, *shift = NULL; + int bits, remaining_bits, is_negative = 0; + long idigit; + int chunk_size = (sizeof(long) < 8) ? 30 : 62; + if (unlikely(!PyLong_CheckExact(v))) { + PyObject *tmp = v; + v = PyNumber_Long(v); + assert(PyLong_CheckExact(v)); + Py_DECREF(tmp); + if (unlikely(!v)) return (char) -1; + } +#if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + if (Py_SIZE(x) == 0) + return (char) 0; + is_negative = Py_SIZE(x) < 0; +#else + { + int result = PyObject_RichCompareBool(x, Py_False, Py_LT); + if (unlikely(result < 0)) + return (char) -1; + is_negative = result == 1; + } +#endif + if (is_unsigned && unlikely(is_negative)) { + goto raise_neg_overflow; + } else if (is_negative) { + stepval = PyNumber_Invert(v); + if (unlikely(!stepval)) + return (char) -1; + } else { + stepval = __Pyx_NewRef(v); + } + val = (char) 0; + mask = PyLong_FromLong((1L << chunk_size) - 1); if (unlikely(!mask)) goto done; + shift = PyLong_FromLong(chunk_size); if (unlikely(!shift)) goto done; + for (bits = 0; bits < (int) sizeof(char) * 8 - chunk_size; bits += chunk_size) { + PyObject *tmp, *digit; + digit = PyNumber_And(stepval, mask); + if (unlikely(!digit)) goto done; + idigit = PyLong_AsLong(digit); + Py_DECREF(digit); + if (unlikely(idigit < 0)) goto done; + tmp = PyNumber_Rshift(stepval, shift); + if (unlikely(!tmp)) goto done; + Py_DECREF(stepval); stepval = tmp; + val |= ((char) idigit) << bits; + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + if (Py_SIZE(stepval) == 0) + goto unpacking_done; + #endif + } + idigit = PyLong_AsLong(stepval); + if (unlikely(idigit < 0)) goto done; + remaining_bits = ((int) sizeof(char) * 8) - bits - (is_unsigned ? 0 : 1); + if (unlikely(idigit >= (1L << remaining_bits))) + goto raise_overflow; + val |= ((char) idigit) << bits; + #if CYTHON_COMPILING_IN_LIMITED_API && PY_VERSION_HEX < 0x030B0000 + unpacking_done: + #endif + if (!is_unsigned) { + if (unlikely(val & (((char) 1) << (sizeof(char) * 8 - 1)))) + goto raise_overflow; + if (is_negative) + val = ~val; + } + ret = 0; + done: + Py_XDECREF(shift); + Py_XDECREF(mask); + Py_XDECREF(stepval); +#endif + Py_DECREF(v); + if (likely(!ret)) + return val; + } + return (char) -1; + } + } else { + char val; + PyObject *tmp = __Pyx_PyNumber_IntOrLong(x); + if (!tmp) return (char) -1; + val = __Pyx_PyInt_As_char(tmp); + Py_DECREF(tmp); + return val; + } +raise_overflow: + PyErr_SetString(PyExc_OverflowError, + "value too large to convert to char"); + return (char) -1; +raise_neg_overflow: + PyErr_SetString(PyExc_OverflowError, + "can't convert negative value to char"); + return (char) -1; +} + +/* FormatTypeName */ + #if CYTHON_COMPILING_IN_LIMITED_API +static __Pyx_TypeName +__Pyx_PyType_GetName(PyTypeObject* tp) +{ + PyObject *name = __Pyx_PyObject_GetAttrStr((PyObject *)tp, + __pyx_n_s_name_2); + if (unlikely(name == NULL) || unlikely(!PyUnicode_Check(name))) { + PyErr_Clear(); + Py_XDECREF(name); + name = __Pyx_NewRef(__pyx_n_s__30); + } + return name; +} +#endif + +/* CheckBinaryVersion */ + static int __Pyx_check_binary_version(void) { + char ctversion[5]; + int same=1, i, found_dot; + const char* rt_from_call = Py_GetVersion(); + PyOS_snprintf(ctversion, 5, "%d.%d", PY_MAJOR_VERSION, PY_MINOR_VERSION); + found_dot = 0; + for (i = 0; i < 4; i++) { + if (!ctversion[i]) { + same = (rt_from_call[i] < '0' || rt_from_call[i] > '9'); + break; + } + if (rt_from_call[i] != ctversion[i]) { + same = 0; + break; + } + } + if (!same) { + char rtversion[5] = {'\0'}; + char message[200]; + for (i=0; i<4; ++i) { + if (rt_from_call[i] == '.') { + if (found_dot) break; + found_dot = 1; + } else if (rt_from_call[i] < '0' || rt_from_call[i] > '9') { + break; + } + rtversion[i] = rt_from_call[i]; + } + PyOS_snprintf(message, sizeof(message), + "compile time version %s of module '%.100s' " + "does not match runtime version %s", + ctversion, __Pyx_MODULE_NAME, rtversion); + return PyErr_WarnEx(NULL, message, 1); + } + return 0; +} + +/* InitStrings */ + #if PY_MAJOR_VERSION >= 3 +static int __Pyx_InitString(__Pyx_StringTabEntry t, PyObject **str) { + if (t.is_unicode | t.is_str) { + if (t.intern) { + *str = PyUnicode_InternFromString(t.s); + } else if (t.encoding) { + *str = PyUnicode_Decode(t.s, t.n - 1, t.encoding, NULL); + } else { + *str = PyUnicode_FromStringAndSize(t.s, t.n - 1); + } + } else { + *str = PyBytes_FromStringAndSize(t.s, t.n - 1); + } + if (!*str) + return -1; + if (PyObject_Hash(*str) == -1) + return -1; + return 0; +} +#endif +static int __Pyx_InitStrings(__Pyx_StringTabEntry *t) { + while (t->p) { + #if PY_MAJOR_VERSION >= 3 + __Pyx_InitString(*t, t->p); + #else + if (t->is_unicode) { + *t->p = PyUnicode_DecodeUTF8(t->s, t->n - 1, NULL); + } else if (t->intern) { + *t->p = PyString_InternFromString(t->s); + } else { + *t->p = PyString_FromStringAndSize(t->s, t->n - 1); + } + if (!*t->p) + return -1; + if (PyObject_Hash(*t->p) == -1) + return -1; + #endif + ++t; + } + return 0; +} + +static CYTHON_INLINE PyObject* __Pyx_PyUnicode_FromString(const char* c_str) { + return __Pyx_PyUnicode_FromStringAndSize(c_str, (Py_ssize_t)strlen(c_str)); +} +static CYTHON_INLINE const char* __Pyx_PyObject_AsString(PyObject* o) { + Py_ssize_t ignore; + return __Pyx_PyObject_AsStringAndSize(o, &ignore); +} +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT +#if !CYTHON_PEP393_ENABLED +static const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + char* defenc_c; + PyObject* defenc = _PyUnicode_AsDefaultEncodedString(o, NULL); + if (!defenc) return NULL; + defenc_c = PyBytes_AS_STRING(defenc); +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + { + char* end = defenc_c + PyBytes_GET_SIZE(defenc); + char* c; + for (c = defenc_c; c < end; c++) { + if ((unsigned char) (*c) >= 128) { + PyUnicode_AsASCIIString(o); + return NULL; + } + } + } +#endif + *length = PyBytes_GET_SIZE(defenc); + return defenc_c; +} +#else +static CYTHON_INLINE const char* __Pyx_PyUnicode_AsStringAndSize(PyObject* o, Py_ssize_t *length) { + if (unlikely(__Pyx_PyUnicode_READY(o) == -1)) return NULL; +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + if (likely(PyUnicode_IS_ASCII(o))) { + *length = PyUnicode_GET_LENGTH(o); + return PyUnicode_AsUTF8(o); + } else { + PyUnicode_AsASCIIString(o); + return NULL; + } +#else + return PyUnicode_AsUTF8AndSize(o, length); +#endif +} +#endif +#endif +static CYTHON_INLINE const char* __Pyx_PyObject_AsStringAndSize(PyObject* o, Py_ssize_t *length) { +#if __PYX_DEFAULT_STRING_ENCODING_IS_ASCII || __PYX_DEFAULT_STRING_ENCODING_IS_DEFAULT + if ( +#if PY_MAJOR_VERSION < 3 && __PYX_DEFAULT_STRING_ENCODING_IS_ASCII + __Pyx_sys_getdefaultencoding_not_ascii && +#endif + PyUnicode_Check(o)) { + return __Pyx_PyUnicode_AsStringAndSize(o, length); + } else +#endif +#if (!CYTHON_COMPILING_IN_PYPY && !CYTHON_COMPILING_IN_LIMITED_API) || (defined(PyByteArray_AS_STRING) && defined(PyByteArray_GET_SIZE)) + if (PyByteArray_Check(o)) { + *length = PyByteArray_GET_SIZE(o); + return PyByteArray_AS_STRING(o); + } else +#endif + { + char* result; + int r = PyBytes_AsStringAndSize(o, &result, length); + if (unlikely(r < 0)) { + return NULL; + } else { + return result; + } + } +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrue(PyObject* x) { + int is_true = x == Py_True; + if (is_true | (x == Py_False) | (x == Py_None)) return is_true; + else return PyObject_IsTrue(x); +} +static CYTHON_INLINE int __Pyx_PyObject_IsTrueAndDecref(PyObject* x) { + int retval; + if (unlikely(!x)) return -1; + retval = __Pyx_PyObject_IsTrue(x); + Py_DECREF(x); + return retval; +} +static PyObject* __Pyx_PyNumber_IntOrLongWrongResultType(PyObject* result, const char* type_name) { + __Pyx_TypeName result_type_name = __Pyx_PyType_GetName(Py_TYPE(result)); +#if PY_MAJOR_VERSION >= 3 + if (PyLong_Check(result)) { + if (PyErr_WarnFormat(PyExc_DeprecationWarning, 1, + "__int__ returned non-int (type " __Pyx_FMT_TYPENAME "). " + "The ability to return an instance of a strict subclass of int is deprecated, " + "and may be removed in a future version of Python.", + result_type_name)) { + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; + } + __Pyx_DECREF_TypeName(result_type_name); + return result; + } +#endif + PyErr_Format(PyExc_TypeError, + "__%.4s__ returned non-%.4s (type " __Pyx_FMT_TYPENAME ")", + type_name, type_name, result_type_name); + __Pyx_DECREF_TypeName(result_type_name); + Py_DECREF(result); + return NULL; +} +static CYTHON_INLINE PyObject* __Pyx_PyNumber_IntOrLong(PyObject* x) { +#if CYTHON_USE_TYPE_SLOTS + PyNumberMethods *m; +#endif + const char *name = NULL; + PyObject *res = NULL; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_Check(x) || PyLong_Check(x))) +#else + if (likely(PyLong_Check(x))) +#endif + return __Pyx_NewRef(x); +#if CYTHON_USE_TYPE_SLOTS + m = Py_TYPE(x)->tp_as_number; + #if PY_MAJOR_VERSION < 3 + if (m && m->nb_int) { + name = "int"; + res = m->nb_int(x); + } + else if (m && m->nb_long) { + name = "long"; + res = m->nb_long(x); + } + #else + if (likely(m && m->nb_int)) { + name = "int"; + res = m->nb_int(x); + } + #endif +#else + if (!PyBytes_CheckExact(x) && !PyUnicode_CheckExact(x)) { + res = PyNumber_Int(x); + } +#endif + if (likely(res)) { +#if PY_MAJOR_VERSION < 3 + if (unlikely(!PyInt_Check(res) && !PyLong_Check(res))) { +#else + if (unlikely(!PyLong_CheckExact(res))) { +#endif + return __Pyx_PyNumber_IntOrLongWrongResultType(res, name); + } + } + else if (!PyErr_Occurred()) { + PyErr_SetString(PyExc_TypeError, + "an integer is required"); + } + return res; +} +static CYTHON_INLINE Py_ssize_t __Pyx_PyIndex_AsSsize_t(PyObject* b) { + Py_ssize_t ival; + PyObject *x; +#if PY_MAJOR_VERSION < 3 + if (likely(PyInt_CheckExact(b))) { + if (sizeof(Py_ssize_t) >= sizeof(long)) + return PyInt_AS_LONG(b); + else + return PyInt_AsSsize_t(b); + } +#endif + if (likely(PyLong_CheckExact(b))) { + #if CYTHON_USE_PYLONG_INTERNALS + if (likely(__Pyx_PyLong_IsCompact(b))) { + return __Pyx_PyLong_CompactValue(b); + } else { + const digit* digits = __Pyx_PyLong_Digits(b); + const Py_ssize_t size = __Pyx_PyLong_SignedDigitCount(b); + switch (size) { + case 2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return (Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -2: + if (8 * sizeof(Py_ssize_t) > 2 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -3: + if (8 * sizeof(Py_ssize_t) > 3 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case 4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return (Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + case -4: + if (8 * sizeof(Py_ssize_t) > 4 * PyLong_SHIFT) { + return -(Py_ssize_t) (((((((((size_t)digits[3]) << PyLong_SHIFT) | (size_t)digits[2]) << PyLong_SHIFT) | (size_t)digits[1]) << PyLong_SHIFT) | (size_t)digits[0])); + } + break; + } + } + #endif + return PyLong_AsSsize_t(b); + } + x = PyNumber_Index(b); + if (!x) return -1; + ival = PyInt_AsSsize_t(x); + Py_DECREF(x); + return ival; +} +static CYTHON_INLINE Py_hash_t __Pyx_PyIndex_AsHash_t(PyObject* o) { + if (sizeof(Py_hash_t) == sizeof(Py_ssize_t)) { + return (Py_hash_t) __Pyx_PyIndex_AsSsize_t(o); +#if PY_MAJOR_VERSION < 3 + } else if (likely(PyInt_CheckExact(o))) { + return PyInt_AS_LONG(o); +#endif + } else { + Py_ssize_t ival; + PyObject *x; + x = PyNumber_Index(o); + if (!x) return -1; + ival = PyInt_AsLong(x); + Py_DECREF(x); + return ival; + } +} +static CYTHON_INLINE PyObject * __Pyx_PyBool_FromLong(long b) { + return b ? __Pyx_NewRef(Py_True) : __Pyx_NewRef(Py_False); +} +static CYTHON_INLINE PyObject * __Pyx_PyInt_FromSize_t(size_t ival) { + return PyInt_FromSize_t(ival); +} + + +/* #### Code section: utility_code_pragmas_end ### */ +#ifdef _MSC_VER +#pragma warning( pop ) +#endif + + + +/* #### Code section: end ### */ +#endif /* Py_PYTHON_H */ diff --git a/models/LL3DA/utils/box_intersection.pyx b/models/LL3DA/utils/box_intersection.pyx new file mode 100644 index 0000000..f40a78f --- /dev/null +++ b/models/LL3DA/utils/box_intersection.pyx @@ -0,0 +1,201 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +import numpy as np + +cimport cython +cimport numpy as np + + +cdef bint boolean_variable = True +np.import_array() + + +FLOAT = np.float32 + +@cython.boundscheck(False) +@cython.wraparound(False) +def computeIntersection(cp1, cp2, s, e): + dc = [ cp1[0] - cp2[0], cp1[1] - cp2[1] ] + dp = [ s[0] - e[0], s[1] - e[1] ] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3] + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline bint inside(cp1, cp2, p): + return(cp2[0]-cp1[0])*(p[1]-cp1[1]) > (cp2[1]-cp1[1])*(p[0]-cp1[0]) + +@cython.boundscheck(False) +def polygon_clip_unnest(float [:, :] subjectPolygon, float [:, :] clipPolygon): + """ Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])] + cp1 = clipPolygon[-1] + cdef int lenc = len(clipPolygon) + cdef int iidx = 0 + + # for clipVertex in clipPolygon: + for cidx in range(lenc): + clipVertex = clipPolygon[cidx] + cp2 = clipVertex + inputList = outputList.copy() + outputList.clear() + s = inputList[-1] + + inc = len(inputList) + + # for subjectVertex in inputList: + for iidx in range(inc): + subjectVertex = inputList[iidx] + e = subjectVertex + if inside(cp1, cp2, e): + if not inside(cp1, cp2, s): + outputList.append(computeIntersection(cp1, cp2, s, e)) + outputList.append(e) + elif inside(cp1, cp2, s): + outputList.append(computeIntersection(cp1, cp2, s, e)) + s = e + cp1 = cp2 + if len(outputList) == 0: + break + return outputList + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef void copy_points(float[:, :] src, float[:, :] dst, Py_ssize_t num_points): + cdef Py_ssize_t i + for i in range(num_points): + dst[i][0] = src[i][0] + dst[i][1] = src[i][1] + + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef inline Py_ssize_t add_point(float[:, :] arr, float[:] point, Py_ssize_t num_points): + # assert num_points < arr.shape[0] - 1 + # for j in range(dim): + arr[num_points][0] = point[0] + arr[num_points][1] = point[1] + num_points = num_points + 1 + return num_points + +@cython.boundscheck(False) +@cython.wraparound(False) +cdef Py_ssize_t computeIntersection_and_add(float[:] cp1, float[:] cp2, float[:] s, float[:] e, float[:, :] arr, Py_ssize_t num_points): + # dc_np = np.zeros(2, dtype=np.float32) + cdef float[2] dc + dc[0] = cp1[0] - cp2[0] + dc[1] = cp1[1] - cp2[1] + + # dp_np = np.zeros(2, dtype=np.float32) + cdef float[2] dp + dp[0] = s[0] - e[0] + dp[1] = s[1] - e[1] + + cdef float n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + cdef float n2 = s[0] * e[1] - s[1] * e[0] + cdef float n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + + arr[num_points][0] = (n1*dp[0] - n2*dc[0]) * n3 + arr[num_points][1] = (n1*dp[1] - n2*dc[1]) * n3 + num_points = num_points + 1 + + return num_points + +@cython.boundscheck(False) +@cython.wraparound(False) +def polygon_clip_float(float [:, :] subjectPolygon, float [:, :] clipPolygon): + """ + Assumes subjectPolygon and clipPolygon have 4 vertices + """ + cdef Py_ssize_t num_clip_points = clipPolygon.shape[0] + cp1 = clipPolygon[num_clip_points - 1] + + MAX_INTERSECT_POINTS = 10 + num_intersect_points = 0 + outputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + cdef float[:, :] outputList = outputList_np + + inputList_np = np.zeros((MAX_INTERSECT_POINTS, 2), dtype=np.float32) + cdef float[:, :] inputList = inputList_np + + copy_points(subjectPolygon, outputList, subjectPolygon.shape[0]) + cdef Py_ssize_t noutput_list = subjectPolygon.shape[0] + cdef Py_ssize_t ninput_list = 0 + cdef Py_ssize_t iidx = 0 + + for cidx in range(num_clip_points): + clipVertex = clipPolygon[cidx] + cp2 = clipVertex + + copy_points(outputList, inputList, noutput_list) + ninput_list = noutput_list + noutput_list = 0 + + s = inputList[ninput_list - 1] + + for iidx in range(ninput_list): + e = inputList[iidx] + if inside(cp1, cp2, e): + if not inside(cp1, cp2, s): + noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + + noutput_list = add_point(outputList, e, noutput_list) + elif inside(cp1, cp2, s): + noutput_list = computeIntersection_and_add(cp1, cp2, s, e, outputList, noutput_list) + s = e + cp1 = cp2 + if noutput_list == 0: + break + return outputList_np, noutput_list + + + +@cython.boundscheck(False) +@cython.wraparound(False) +def box_intersection(float [:, :, :, :] rect1, + float [:, :, :, :] rect2, + float [:, :, :] non_rot_inter_areas, + int[:] nums_k2, + float [:, :, :] inter_areas, + bint approximate): + """ + rect1 - B x K1 x 8 x 3 matrix of box corners + rect2 - B x K2 x 8 x 3 matrix of box corners + non_rot_inter_areas - intersection areas of boxes + """ + + cdef Py_ssize_t B = rect1.shape[0] + cdef Py_ssize_t K1 = rect1.shape[1] + cdef Py_ssize_t K2 = rect2.shape[2] + + + for b in range(B): + for k1 in range(K1): + for k2 in range(K2): + if k2 >= nums_k2[b]: + break + + if approximate and non_rot_inter_areas[b][k1][k2] == 0: + continue + + ##### compute volume of intersection + inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) + ninter = len(inter) + if ninter > 0: # there is some intersection between the boxes + xs = np.array([x[0] for x in inter]).astype(dtype=FLOAT) + ys = np.array([x[1] for x in inter]).astype(dtype=FLOAT) + inter_areas[b,k1,k2] = 0.5 * np.abs(np.dot(xs,np.roll(ys,1))-np.dot(ys,np.roll(xs,1))) diff --git a/models/LL3DA/utils/box_ops3d.py b/models/LL3DA/utils/box_ops3d.py new file mode 100644 index 0000000..fa003bf --- /dev/null +++ b/models/LL3DA/utils/box_ops3d.py @@ -0,0 +1,855 @@ +# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved +"""Utilities for bounding box manipulation and GIoU.""" +from typing import List + +import torch +from torchvision.ops.boxes import box_area + +try: + from box_intersection import batch_intersect +except ImportError: + print('Could not import cythonized batch_intersection') + batch_intersect = None + +import numpy as np +from scipy.spatial import ConvexHull + + +def polygon_clip(subjectPolygon, clipPolygon): + """Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + + def inside(p): + return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - + cp1[1]) * (p[0] - cp1[0]) + + # diff_cp = cp2 - cp1 + # diff_p = p - cp1 + # diff_p = diff_p[[1, 0]] + # mult = diff_cp * diff_p + # return mult[0] > mult[1] + + def computeIntersection(): + dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] + dp = [s[0] - e[0], s[1] - e[1]] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3] + + # dc = cp1 - cp2 + # dp = s - e + # n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + # n2 = s[0] * e[1] - s[1] * e[0] + # n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + # return (n1 * dp - n2 * dc) * n3 + + outputList = subjectPolygon + cp1 = clipPolygon[-1] + + for clipVertex in clipPolygon: + cp2 = clipVertex + inputList = outputList + outputList = [] + s = inputList[-1] + + for subjectVertex in inputList: + e = subjectVertex + if inside(e): + if not inside(s): + outputList.append(computeIntersection()) + outputList.append(e) + elif inside(s): + outputList.append(computeIntersection()) + s = e + cp1 = cp2 + if len(outputList) == 0: + return None + return (outputList) + + +def helper_computeIntersection(cp1: torch.Tensor, cp2: torch.Tensor, + s: torch.Tensor, e: torch.Tensor): + dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] + dp = [s[0] - e[0], s[1] - e[1]] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + # return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3] + return torch.stack([(n1 * dp[0] - n2 * dc[0]) * n3, + (n1 * dp[1] - n2 * dc[1]) * n3]) + + +def helper_inside(cp1: torch.Tensor, cp2: torch.Tensor, p: torch.Tensor): + ineq = (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - + cp1[0]) + return ineq.item() + + +def polygon_clip_unnest(subjectPolygon: torch.Tensor, + clipPolygon: torch.Tensor): + """Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])] + cp1 = clipPolygon[-1] + + for clipVertex in clipPolygon: + cp2 = clipVertex + inputList = outputList.copy() + outputList.clear() + s = inputList[-1] + + for subjectVertex in inputList: + e = subjectVertex + if helper_inside(cp1, cp2, e): + if not helper_inside(cp1, cp2, s): + outputList.append( + helper_computeIntersection(cp1, cp2, s, e)) + outputList.append(e) + elif helper_inside(cp1, cp2, s): + outputList.append(helper_computeIntersection(cp1, cp2, s, e)) + s = e + cp1 = cp2 + if len(outputList) == 0: + # return None + break + return outputList + + +def poly_area(x, y): + """ Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """ + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def poly_area_tensor(x, y): + return 0.5 * torch.abs( + torch.dot(x, torch.roll(y, 1)) - torch.dot(y, torch.roll(x, 1))) + + +def box3d_vol_tensor(corners): + EPS = 1e-6 + reshape = False + B, K = corners.shape[0], corners.shape[1] + if len(corners.shape) == 4: + # batch x prop x 8 x 3 + reshape = True + corners = corners.view(-1, 8, 3) + a = torch.sqrt( + (corners[:, 0, :] - corners[:, 1, :]).pow(2).sum(dim=1).clamp(min=EPS)) + b = torch.sqrt( + (corners[:, 1, :] - corners[:, 2, :]).pow(2).sum(dim=1).clamp(min=EPS)) + c = torch.sqrt( + (corners[:, 0, :] - corners[:, 4, :]).pow(2).sum(dim=1).clamp(min=EPS)) + vols = a * b * c + if reshape: + vols = vols.view(B, K) + return vols + + +def convex_hull_intersection(p1, p2): + """Compute area of two convex hull's intersection area. + + p1,p2 are a list of (x,y) tuples of hull vertices. return a list of (x,y) + for the intersection and its volume + """ + inter_p = polygon_clip(p1, p2) + if inter_p is not None: + hull_inter = ConvexHull(inter_p) + return inter_p, hull_inter.volume + else: + return None, 0.0 + + +def box3d_vol(corners): + ''' corners: (8,3) no assumption on axis direction ''' + a = np.sqrt(np.sum((corners[0, :] - corners[1, :])**2)) + b = np.sqrt(np.sum((corners[1, :] - corners[2, :])**2)) + c = np.sqrt(np.sum((corners[0, :] - corners[4, :])**2)) + return a * b * c + + +def enclosing_box3d_vol(corners1, corners2): + """volume of enclosing axis-aligned box.""" + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners2.shape[2] == 8 + assert corners2.shape[3] == 3 + EPS = 1e-6 + + corners1 = corners1.clone() + corners2 = corners2.clone() + # flip Y axis, since it is negative + corners1[:, :, :, 1] *= -1 + corners2[:, :, :, 1] *= -1 + + # min_a = torch.min(corners1[:, :, 0, :][:, :, None, :] , corners2[:, :, 0, :][:, None, :, :]) + # max_a = torch.max(corners1[:, :, 1, :][:, :, None, :] , corners2[:, :, 1, :][:, None, :, :]) + # a = (max_a - min_a).pow(2).sum(dim=3).clamp(min=EPS).sqrt() + + # min_b = torch.min(corners1[:, :, 1, :][:, :, None, :] , corners2[:, :, 1, :][:, None, :, :]) + # max_b = torch.max(corners1[:, :, 2, :][:, :, None, :] , corners2[:, :, 2, :][:, None, :, :]) + # b = (max_b - min_b).pow(2).sum(dim=3).clamp(min=EPS).sqrt() + + # min_c = torch.min(corners1[:, :, 0, :][:, :, None, :] , corners2[:, :, 0, :][:, None, :, :]) + # max_c = torch.max(corners1[:, :, 4, :][:, :, None, :] , corners2[:, :, 4, :][:, None, :, :]) + # c = (max_c - min_c).pow(2).sum(dim=3).clamp(min=EPS).sqrt() + + # vol = a * b * c + + al_xmin = torch.min( + torch.min(corners1[:, :, :, 0], dim=2).values[:, :, None], + torch.min(corners2[:, :, :, 0], dim=2).values[:, None, :]) + al_ymin = torch.max( + torch.max(corners1[:, :, :, 1], dim=2).values[:, :, None], + torch.max(corners2[:, :, :, 1], dim=2).values[:, None, :]) + al_zmin = torch.min( + torch.min(corners1[:, :, :, 2], dim=2).values[:, :, None], + torch.min(corners2[:, :, :, 2], dim=2).values[:, None, :]) + al_xmax = torch.max( + torch.max(corners1[:, :, :, 0], dim=2).values[:, :, None], + torch.max(corners2[:, :, :, 0], dim=2).values[:, None, :]) + al_ymax = torch.min( + torch.min(corners1[:, :, :, 1], dim=2).values[:, :, None], + torch.min(corners2[:, :, :, 1], dim=2).values[:, None, :]) + al_zmax = torch.max( + torch.max(corners1[:, :, :, 2], dim=2).values[:, :, None], + torch.max(corners2[:, :, :, 2], dim=2).values[:, None, :]) + + diff_x = torch.abs(al_xmax - al_xmin) + diff_y = torch.abs(al_ymax - al_ymin) + diff_z = torch.abs(al_zmax - al_zmin) + vol = diff_x * diff_y * diff_z + return vol + + +def is_clockwise(p): + x = p[:, 0] + y = p[:, 1] + return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0 + + +def box3d_iou(corners1, corners2): + """Compute 3D bounding box IoU. + + Input: + corners1: numpy array (8,3), assume up direction is negative Y + corners2: numpy array (8,3), assume up direction is negative Y + Output: + iou: 3D bounding box IoU + iou_2d: bird's eye view 2D bounding box IoU + + todo (rqi): add more description on corner points' orders. + """ + rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)] + rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)] + inter, inter_area = convex_hull_intersection(rect1, rect2) + + # corner points are in counter clockwise order + # area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1]) + # area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1]) + + # iou_2d = inter_area/(area1+area2-inter_area) + + ymax = min(corners1[0, 1], corners2[0, 1]) + ymin = max(corners1[4, 1], corners2[4, 1]) + inter_vol = inter_area * max(0.0, ymax - ymin) + vol1 = box3d_vol(corners1) + vol2 = box3d_vol(corners2) + union = (vol1 + vol2 - inter_vol) + iou = inter_vol / union + return iou, union + + +@torch.jit.ignore +def to_list_1d(arr) -> List[float]: + arr = arr.detach().cpu().numpy().tolist() + return arr + + +@torch.jit.ignore +def to_list_3d(arr) -> List[List[List[float]]]: + arr = arr.detach().cpu().numpy().tolist() + return arr + + +def generalized_box3d_iou_tensor_non_diff(corners1: torch.Tensor, + corners2: torch.Tensor, + nums_k2: torch.Tensor, + rotated_boxes: bool = True, + return_inter_vols_only: bool = False, + approximate: bool = True): + if batch_intersect is None: + return generalized_box3d_iou_tensor_jit(corners1, corners2, nums_k2, + rotated_boxes, + return_inter_vols_only) + else: + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == corners2.shape[2] + assert corners1.shape[3] == corners2.shape[3] + + B, K1 = corners1.shape[0], corners1.shape[1] + _, K2 = corners2.shape[0], corners2.shape[1] + + # # box height. Y is negative, so max is torch.min + ymax = torch.min(corners1[:, :, 0, 1][:, :, None], + corners2[:, :, 0, 1][:, None, :]) + ymin = torch.max(corners1[:, :, 4, 1][:, :, None], + corners2[:, :, 4, 1][:, None, :]) + height = (ymax - ymin).clamp(min=0) + EPS = 1e-8 + + idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device) + idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device) + rect1 = corners1[:, :, idx, :] + rect2 = corners2[:, :, idx, :] + rect1 = rect1[:, :, :, idx2] + rect2 = rect2[:, :, :, idx2] + + lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, + 1][:, None, :, :]) + rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, + 3][:, None, :, :]) + wh = (rb - lt).clamp(min=0) + non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1] + non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2) + if nums_k2 is not None: + for b in range(B): + non_rot_inter_areas[b, :, nums_k2[b]:] = 0 + + enclosing_vols = enclosing_box3d_vol(corners1, corners2) + + # vols of boxes + vols1 = box3d_vol_tensor(corners1).clamp(min=EPS) + vols2 = box3d_vol_tensor(corners2).clamp(min=EPS) + + sum_vols = vols1[:, :, None] + vols2[:, None, :] + + # filter malformed boxes + good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS) + if rotated_boxes: + inter_areas = np.zeros((B, K1, K2), dtype=np.float32) + rect1 = rect1.cpu().detach().numpy() + rect2 = rect2.cpu().detach().numpy() + nums_k2_np = nums_k2.cpu().numpy() + non_rot_inter_areas_np = non_rot_inter_areas.cpu().detach().numpy() + batch_intersect(rect1, rect2, non_rot_inter_areas_np, nums_k2_np, + inter_areas, approximate) + inter_areas = torch.from_numpy(inter_areas) + else: + inter_areas = non_rot_inter_areas + + inter_areas = inter_areas.to(corners1.device) + ### gIOU = iou - (1 - sum_vols/enclose_vol) + inter_vols = inter_areas * height + if return_inter_vols_only: + return inter_vols + + union_vols = (sum_vols - inter_vols).clamp(min=EPS) + ious = inter_vols / union_vols + giou_second_term = -(1 - union_vols / enclosing_vols) + gious = ious + giou_second_term + gious *= good_boxes + if nums_k2 is not None: + mask = torch.zeros((B, K1, K2), + device=height.device, + dtype=torch.float32) + for b in range(B): + mask[b, :, :nums_k2[b]] = 1 + gious *= mask + return gious + + +def generalized_box3d_iou_tensor(corners1: torch.Tensor, + corners2: torch.Tensor, + nums_k2: torch.Tensor, + rotated_boxes: bool = True, + return_inter_vols_only: bool = False, + no_grad: bool = False): + """ + Input: + corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y + corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y + Assumes that the box is only rotated along Z direction + Returns: + B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned + The return IOU is differentiable + """ + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == corners2.shape[2] + assert corners1.shape[3] == corners2.shape[3] + + B, K1 = corners1.shape[0], corners1.shape[1] + _, K2 = corners2.shape[0], corners2.shape[1] + + # # box height. Y is negative, so max is torch.min + ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, + 1][:, None, :]) + ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, + 1][:, None, :]) + height = (ymax - ymin).clamp(min=0) + EPS = 1e-8 + + idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device) + idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device) + rect1 = corners1[:, :, idx, :] + rect2 = corners2[:, :, idx, :] + rect1 = rect1[:, :, :, idx2] + rect2 = rect2[:, :, :, idx2] + + lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, + None, :, :]) + rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, + None, :, :]) + wh = (rb - lt).clamp(min=0) + non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1] + non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2) + if nums_k2 is not None: + for b in range(B): + non_rot_inter_areas[b, :, nums_k2[b]:] = 0 + + enclosing_vols = enclosing_box3d_vol(corners1, corners2) + + # vols of boxes + vols1 = box3d_vol_tensor(corners1).clamp(min=EPS) + vols2 = box3d_vol_tensor(corners2).clamp(min=EPS) + + sum_vols = vols1[:, :, None] + vols2[:, None, :] + + # filter malformed boxes + good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS) + + if rotated_boxes: + inter_areas = torch.zeros((B, K1, K2), dtype=torch.float32) + rect1 = rect1.cpu() + rect2 = rect2.cpu() + nums_k2_np = to_list_1d(nums_k2) + non_rot_inter_areas_np = to_list_3d(non_rot_inter_areas) + for b in range(B): + for k1 in range(K1): + for k2 in range(K2): + if nums_k2 is not None and k2 >= nums_k2_np[b]: + break + if non_rot_inter_areas_np[b][k1][k2] == 0: + continue + ##### compute volume of intersection + # inter = polygon_clip(rect1[b, k1], rect2[b, k2]) + inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) + # if inter is None: + # if len(inter) == 0: + # # area = torch.zeros(1, dtype=torch.float32, device=inter_areas.device).squeeze() + # # area = 0 + # continue + # else: + + if len(inter) > 0: + # inter = torch.stack(inter) + # xs = inter[:, 0] + # ys = inter[:, 1] + xs = torch.stack([x[0] for x in inter]) + ys = torch.stack([x[1] for x in inter]) + # area = poly_area_tensor(xs, ys) + inter_areas[b, k1, k2] = torch.abs( + torch.dot(xs, torch.roll(ys, 1)) - + torch.dot(ys, torch.roll(xs, 1))) + inter_areas.mul_(0.5) + else: + inter_areas = non_rot_inter_areas + + inter_areas = inter_areas.to(corners1.device) + ### gIOU = iou - (1 - sum_vols/enclose_vol) + inter_vols = inter_areas * height + if return_inter_vols_only: + return inter_vols + + union_vols = (sum_vols - inter_vols).clamp(min=EPS) + ious = inter_vols / union_vols + giou_second_term = -(1 - union_vols / enclosing_vols) + gious = ious + giou_second_term + gious *= good_boxes + if nums_k2 is not None: + mask = torch.zeros((B, K1, K2), + device=height.device, + dtype=torch.float32) + for b in range(B): + mask[b, :, :nums_k2[b]] = 1 + gious *= mask + return gious + + +generalized_box3d_iou_tensor_jit = torch.jit.script( + generalized_box3d_iou_tensor) + + +def enclosing_box3d_convex_hull(corners1, + corners2, + nums_k2, + mask, + enclosing_vols=None): + B, K1 = corners1.shape[0], corners1.shape[1] + _, K2 = corners2.shape[0], corners2.shape[1] + if enclosing_vols is None: + enclosing_vols = np.zeros((B, K1, K2)).astype(np.float32) + for b in range(B): + for k1 in range(K1): + for k2 in range(K2): + if nums_k2 is not None and k2 >= nums_k2[b]: + break + if mask is not None and mask[b, k1, k2] <= 0: + continue + + hull = ConvexHull(np.vstack([corners1[b, k1], corners2[b, + k2]])) + enclosing_vols[b, k1, k2] = hull.volume + return enclosing_vols + + +enclosing_box3d_convex_hull_numba = autojit(enclosing_box3d_convex_hull) +# enclosing_box3d_convex_hull_numba = enclosing_box3d_convex_hull + + +def generalized_box3d_iou_convex_hull_nondiff_tensor( + corners1: torch.Tensor, + corners2: torch.Tensor, + nums_k2: torch.Tensor, + rotated_boxes: bool = True): + """ + Input: + corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y + corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y + Assumes that the box is only rotated along Z direction + Returns: + B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned + The return IOU is differentiable + """ + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == corners2.shape[2] + assert corners1.shape[3] == corners2.shape[3] + + B, K1 = corners1.shape[0], corners1.shape[1] + _, K2 = corners2.shape[0], corners2.shape[1] + EPS = 1e-8 + + # vols of boxes + vols1 = box3d_vol_tensor(corners1).clamp(min=EPS) + vols2 = box3d_vol_tensor(corners2).clamp(min=EPS) + + sum_vols = vols1[:, :, None] + vols2[:, None, :] + + inter_vols = generalized_box3d_iou_tensor_jit(corners1, + corners2, + nums_k2, + rotated_boxes, + return_inter_vols_only=True) + + enclosing_vols = enclosing_box3d_vol(corners1, corners2) + + if rotated_boxes: + corners1_np = corners1.detach().cpu().numpy() + corners2_np = corners2.detach().cpu().numpy() + mask = inter_vols.detach().cpu().numpy() + nums_k2 = nums_k2.cpu().numpy() + enclosing_vols_np = enclosing_vols.detach().cpu().numpy() + enclosing_vols = enclosing_box3d_convex_hull_numba( + corners1_np, corners2_np, nums_k2, mask, enclosing_vols_np) + enclosing_vols = torch.from_numpy(enclosing_vols).to(corners1.device) + + union_vols = (sum_vols - inter_vols).clamp(min=EPS) + ious = inter_vols / union_vols + giou_second_term = -(1 - union_vols / enclosing_vols) + gious = ious + giou_second_term + good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS) + gious *= good_boxes + if nums_k2 is not None: + mask = torch.zeros((B, K1, K2), + device=corners1.device, + dtype=torch.float32) + for b in range(B): + mask[b, :, :nums_k2[b]] = 1 + gious *= mask + return gious + + +def generalized_box3d_iou(corners1, corners2, nums_k2=None): + """ + Input: + corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y + corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y + mask: + Returns: + B x K1 x K2 matrix of generalized IOU + """ + # GenIOU = IOU - (C - sum_of_vols)/ C + # where C = vol of convex_hull containing all points + + # degenerate boxes gives inf / nan results + # so do an early check + #TODO: + assert corners1.ndim == 4 + assert corners2.ndim == 4 + assert corners1.shape[0] == corners2.shape[0] + B, K1, _, _ = corners1.shape + _, K2, _, _ = corners2.shape + + gious = torch.zeros((B, K1, K2), dtype=torch.float32) + + corners1_np = corners1.detach().cpu().numpy() + corners2_np = corners2.detach().cpu().numpy() + + for b in range(B): + for i in range(K1): + for j in range(K2): + if nums_k2 is not None and j >= nums_k2[b]: + break + iou, sum_of_vols = box3d_iou(corners1_np[b, i], corners2_np[b, + j]) + + hull = ConvexHull( + np.vstack([corners1_np[b, i], corners2_np[b, j]])) + C = hull.volume + + giou = iou - (C - sum_of_vols) / C + gious[b, i, j] = giou + return gious + + +# ----------------------------------------------------------- +# Convert from box parameters to +# ----------------------------------------------------------- +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def get_3d_box(box_size, heading_angle, center): + """box_size is array(l,w,h), heading_angle is radius clockwise from pos x + axis, center is xyz of box center output (8,3) array for 3D box cornders + Similar to utils/compute_orientation_3d.""" + R = roty(heading_angle) + l, w, h = box_size + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2] + z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners])) + corners_3d[0, :] = corners_3d[0, :] + center[0] + corners_3d[1, :] = corners_3d[1, :] + center[1] + corners_3d[2, :] = corners_3d[2, :] + center[2] + corners_3d = np.transpose(corners_3d) + return corners_3d + + +def get_3d_box_batch(box_size, heading_angle, center): + ''' box_size: [x1,x2,...,xn,3] -- box dimensions without flipping [X, Y, Z] -- l, w, h + heading_angle: [x1,x2,...,xn] -- theta in radians + center: [x1,x2,...,xn,3] -- center point has been flipped to camera axis [X, -Z, Y] + Return: + [x1,x3,...,xn,8,3] + ''' + input_shape = heading_angle.shape + R = roty_batch(heading_angle) + l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1] + w = np.expand_dims(box_size[..., 1], -1) + h = np.expand_dims(box_size[..., 2], -1) + corners_3d = np.zeros(tuple(list(input_shape) + [8, 3])) + corners_3d[..., :, 0] = np.concatenate( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1) + corners_3d[..., :, 1] = np.concatenate( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1) + corners_3d[..., :, 2] = np.concatenate( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist))) + corners_3d += np.expand_dims(center, -2) + return corners_3d + + +def roty_batch_tensor(t): + input_shape = t.shape + output = torch.zeros(tuple(list(input_shape) + [3, 3]), + dtype=torch.float32, + device=t.device) + c = torch.cos(t) + s = torch.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def flip_axis_to_camera_tensor(pc): + """Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward Input and output + are both (N,3) array.""" + pc2 = torch.clone(pc) + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y + pc2[..., 1] *= -1 + return pc2 + + +def get_3d_box_batch_tensor(box_size, heading_angle, center): + assert isinstance(box_size, torch.Tensor) + assert isinstance(heading_angle, torch.Tensor) + assert isinstance(center, torch.Tensor) + + reshape_final = False + if heading_angle.ndim == 2: + assert box_size.ndim == 3 + assert center.ndim == 3 + bsize = box_size.shape[0] + nprop = box_size.shape[1] + box_size = box_size.view(-1, box_size.shape[-1]) + heading_angle = heading_angle.view(-1) + center = center.reshape(-1, 3) + reshape_final = True + + input_shape = heading_angle.shape + R = roty_batch_tensor(heading_angle) + l = torch.unsqueeze(box_size[..., 0], -1) # [x1,...,xn,1] + w = torch.unsqueeze(box_size[..., 1], -1) + h = torch.unsqueeze(box_size[..., 2], -1) + corners_3d = torch.zeros(tuple(list(input_shape) + [8, 3]), + device=box_size.device, + dtype=torch.float32) + corners_3d[..., :, 0] = torch.cat( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1) + corners_3d[..., :, 1] = torch.cat( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1) + corners_3d[..., :, 2] = torch.cat( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = torch.matmul(corners_3d, R.permute(tlist)) + corners_3d += torch.unsqueeze(center, -2) + if reshape_final: + corners_3d = corners_3d.reshape(bsize, nprop, 8, 3) + return corners_3d + + +def box_cxcywh_to_xyxy(x): + x_c, y_c, w, h = x.unbind(-1) + b = [(x_c - 0.5 * w), (y_c - 0.5 * h), (x_c + 0.5 * w), (y_c + 0.5 * h)] + return torch.stack(b, dim=-1) + + +def box_xyxy_to_cxcywh(x): + x0, y0, x1, y1 = x.unbind(-1) + b = [(x0 + x1) / 2, (y0 + y1) / 2, (x1 - x0), (y1 - y0)] + return torch.stack(b, dim=-1) + + +if __name__ == '__main__': + + # Function for polygon ploting + import matplotlib + import matplotlib.pyplot as plt + from matplotlib.collections import PatchCollection + from matplotlib.patches import Polygon + + def plot_polys(plist, scale=500.0): + fig, ax = plt.subplots() + patches = [] + for p in plist: + poly = Polygon(np.array(p) / scale, True) + patches.append(poly) + + pc = PatchCollection(patches, cmap=matplotlib.cm.jet, alpha=0.5) + colors = 100 * np.random.rand(len(patches)) + pc.set_array(np.array(colors)) + ax.add_collection(pc) + plt.show() + + # Demo on ConvexHull + points = np.random.rand(30, 2) # 30 random points in 2-D + hull = ConvexHull(points) + # **In 2D "volume" is is area, "area" is perimeter + print(('Hull area: ', hull.volume)) + for simplex in hull.simplices: + print(simplex) + + # Demo on convex hull overlaps + sub_poly = [(0, 0), (300, 0), (300, 300), (0, 300)] + clip_poly = [(150, 150), (300, 300), (150, 450), (0, 300)] + inter_poly = polygon_clip(sub_poly, clip_poly) + print(poly_area(np.array(inter_poly)[:, 0], np.array(inter_poly)[:, 1])) + + # Test convex hull interaction function + rect1 = [(50, 0), (50, 300), (300, 300), (300, 0)] + rect2 = [(150, 150), (300, 300), (150, 450), (0, 300)] + plot_polys([rect1, rect2]) + inter, area = convex_hull_intersection(rect1, rect2) + print((inter, area)) + if inter is not None: + print(poly_area(np.array(inter)[:, 0], np.array(inter)[:, 1])) + + print('------------------') + rect1 = [(0.30026005199835404, 8.9408694211408424), \ + (-1.1571105364358421, 9.4686676477075533), \ + (0.1777082043006144, 13.154404877812102), \ + (1.6350787927348105, 12.626606651245391)] + rect1 = [rect1[0], rect1[3], rect1[2], rect1[1]] + rect2 = [(0.23908745901608636, 8.8551095691132886), \ + (-1.2771419487733995, 9.4269062966181956), \ + (0.13138836963152717, 13.161896351296868), \ + (1.647617777421013, 12.590099623791961)] + rect2 = [rect2[0], rect2[3], rect2[2], rect2[1]] + plot_polys([rect1, rect2]) + inter, area = convex_hull_intersection(rect1, rect2) + print((inter, area)) diff --git a/models/LL3DA/utils/box_util.py b/models/LL3DA/utils/box_util.py new file mode 100644 index 0000000..23b1ef6 --- /dev/null +++ b/models/LL3DA/utils/box_util.py @@ -0,0 +1,799 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Helper functions for calculating 2D and 3D bounding box IoU. + +Collected and written by Charles R. Qi Last modified: Apr 2021 by Ishan Misra +""" +import numpy as np +import torch +from scipy.spatial import ConvexHull, Delaunay +from utils.misc import to_list_1d, to_list_3d + +try: + from utils.box_intersection import box_intersection +except ImportError: + print( + 'Could not import cythonized box intersection. Consider compiling box_intersection.pyx for faster training.' + ) + box_intersection = None + + +def in_hull(p, hull): + if not isinstance(hull, Delaunay): + hull = Delaunay(hull) + return hull.find_simplex(p) >= 0 + + +def extract_pc_in_box3d(pc, box3d): + """pc: (N,3), box3d: (8,3)""" + box3d_roi_inds = in_hull(pc[:, 0:3], box3d) + return pc[box3d_roi_inds, :], box3d_roi_inds + + +def polygon_clip(subjectPolygon, clipPolygon): + """Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + + def inside(p): + return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - + cp1[1]) * (p[0] - cp1[0]) + + def computeIntersection(): + dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] + dp = [s[0] - e[0], s[1] - e[1]] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3] + + outputList = subjectPolygon + cp1 = clipPolygon[-1] + + for clipVertex in clipPolygon: + cp2 = clipVertex + inputList = outputList + outputList = [] + s = inputList[-1] + + for subjectVertex in inputList: + e = subjectVertex + if inside(e): + if not inside(s): + outputList.append(computeIntersection()) + outputList.append(e) + elif inside(s): + outputList.append(computeIntersection()) + s = e + cp1 = cp2 + if len(outputList) == 0: + return None + return outputList + + +def poly_area(x, y): + """Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates""" + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def convex_hull_intersection(p1, p2): + """Compute area of two convex hull's intersection area. + + p1,p2 are a list of (x,y) tuples of hull vertices. return a list of (x,y) + for the intersection and its volume + """ + inter_p = polygon_clip(p1, p2) + if inter_p is not None: + try: # for safety issue + hull_inter = ConvexHull(inter_p) + return inter_p, hull_inter.volume + except: + return None, 0.0 + else: + return None, 0.0 + + +def box3d_vol(corners): + """corners: (8,3) no assumption on axis direction""" + a = np.sqrt(np.sum((corners[0, :] - corners[1, :])**2)) + b = np.sqrt(np.sum((corners[1, :] - corners[2, :])**2)) + c = np.sqrt(np.sum((corners[0, :] - corners[4, :])**2)) + return a * b * c + + +def is_clockwise(p): + x = p[:, 0] + y = p[:, 1] + return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0 + + +def box3d_iou(corners1, corners2): + """Compute 3D bounding box IoU. + + Input: + corners1: numpy array (8,3), assume up direction is negative Y + corners2: numpy array (8,3), assume up direction is negative Y + Output: + iou: 3D bounding box IoU + iou_2d: bird's eye view 2D bounding box IoU + + todo (rqi): add more description on corner points' orders. + """ + # corner points are in counter clockwise order + rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)] + rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)] + area1 = poly_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1]) + area2 = poly_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1]) + inter, inter_area = convex_hull_intersection(rect1, rect2) + iou_2d = inter_area / (area1 + area2 - inter_area) + ymax = min(corners1[0, 1], corners2[0, 1]) + ymin = max(corners1[4, 1], corners2[4, 1]) + inter_vol = inter_area * max(0.0, ymax - ymin) + vol1 = box3d_vol(corners1) + vol2 = box3d_vol(corners2) + iou = inter_vol / (vol1 + vol2 - inter_vol) + return iou, iou_2d + + +def get_iou(bb1, bb2): + """Calculate the Intersection over Union (IoU) of two 2D bounding boxes. + + Parameters + ---------- + bb1 : dict + Keys: {'x1', 'x2', 'y1', 'y2'} + The (x1, y1) position is at the top left corner, + the (x2, y2) position is at the bottom right corner + bb2 : dict + Keys: {'x1', 'x2', 'y1', 'y2'} + The (x, y) position is at the top left corner, + the (x2, y2) position is at the bottom right corner + + Returns + ------- + float + in [0, 1] + """ + assert bb1['x1'] < bb1['x2'] + assert bb1['y1'] < bb1['y2'] + assert bb2['x1'] < bb2['x2'] + assert bb2['y1'] < bb2['y2'] + + # determine the coordinates of the intersection rectangle + x_left = max(bb1['x1'], bb2['x1']) + y_top = max(bb1['y1'], bb2['y1']) + x_right = min(bb1['x2'], bb2['x2']) + y_bottom = min(bb1['y2'], bb2['y2']) + + if x_right < x_left or y_bottom < y_top: + return 0.0 + + # The intersection of two axis-aligned bounding boxes is always an + # axis-aligned bounding box + intersection_area = (x_right - x_left) * (y_bottom - y_top) + + # compute the area of both AABBs + bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1']) + bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1']) + + # compute the intersection over union by taking the intersection + # area and dividing it by the sum of prediction + ground-truth + # areas - the interesection area + iou = intersection_area / float(bb1_area + bb2_area - intersection_area) + assert iou >= 0.0 + assert iou <= 1.0 + return iou + + +def box2d_iou(box1, box2): + """Compute 2D bounding box IoU. + + Input: + box1: tuple of (xmin,ymin,xmax,ymax) + box2: tuple of (xmin,ymin,xmax,ymax) + Output: + iou: 2D IoU scalar + """ + return get_iou( + { + 'x1': box1[0], + 'y1': box1[1], + 'x2': box1[2], + 'y2': box1[3] + }, + { + 'x1': box2[0], + 'y1': box2[1], + 'x2': box2[2], + 'y2': box2[3] + }, + ) + + +# ----------------------------------------------------------- +# Convert from box parameters to +# ----------------------------------------------------------- +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def get_3d_box(box_size, heading_angle, center): + """box_size is array(l,w,h), heading_angle is radius clockwise from pos x + axis, center is xyz of box center output (8,3) array for 3D box cornders + Similar to utils/compute_orientation_3d.""" + R = roty(heading_angle) + l, w, h = box_size + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2] + z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners])) + corners_3d[0, :] = corners_3d[0, :] + center[0] + corners_3d[1, :] = corners_3d[1, :] + center[1] + corners_3d[2, :] = corners_3d[2, :] + center[2] + corners_3d = np.transpose(corners_3d) + return corners_3d + + +def flip_axis_to_camera_np(pc): + """Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward Input and output + are both (N,3) array.""" + pc2 = pc.copy() + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y + pc2[..., 1] *= -1 + return pc2 + + +def get_3d_box_batch_np(box_size, angle, center): + input_shape = angle.shape + R = roty_batch(angle) + l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1] + w = np.expand_dims(box_size[..., 1], -1) + h = np.expand_dims(box_size[..., 2], -1) + corners_3d = np.zeros(tuple(list(input_shape) + [8, 3])) + corners_3d[..., :, 0] = np.concatenate( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1) + corners_3d[..., :, 1] = np.concatenate( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1) + corners_3d[..., :, 2] = np.concatenate( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist))) + corners_3d += np.expand_dims(center, -2) + return corners_3d + + +def flip_axis_to_camera_tensor(pc): + """Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward Input and output + are both (N,3) array.""" + pc2 = torch.clone(pc) + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y + pc2[..., 1] *= -1 + return pc2 + + +def roty_batch_tensor(t): + input_shape = t.shape + output = torch.zeros(tuple(list(input_shape) + [3, 3]), + dtype=torch.float32, + device=t.device) + c = torch.cos(t) + s = torch.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def get_3d_box_batch_tensor(box_size, angle, center): + assert isinstance(box_size, torch.Tensor) + assert isinstance(angle, torch.Tensor) + assert isinstance(center, torch.Tensor) + + reshape_final = False + if angle.ndim == 2: + assert box_size.ndim == 3 + assert center.ndim == 3 + bsize = box_size.shape[0] + nprop = box_size.shape[1] + box_size = box_size.reshape(-1, box_size.shape[-1]) + angle = angle.reshape(-1) + center = center.reshape(-1, 3) + reshape_final = True + + input_shape = angle.shape + R = roty_batch_tensor(angle) + l = torch.unsqueeze(box_size[..., 0], -1) # [x1,...,xn,1] + w = torch.unsqueeze(box_size[..., 1], -1) + h = torch.unsqueeze(box_size[..., 2], -1) + corners_3d = torch.zeros(tuple(list(input_shape) + [8, 3]), + device=box_size.device, + dtype=torch.float32) + corners_3d[..., :, 0] = torch.cat( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1) + corners_3d[..., :, 1] = torch.cat( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1) + corners_3d[..., :, 2] = torch.cat( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = torch.matmul(corners_3d, R.permute(tlist)) + corners_3d += torch.unsqueeze(center, -2) + if reshape_final: + corners_3d = corners_3d.reshape(bsize, nprop, 8, 3) + return corners_3d + + +def get_3d_box_batch(box_size, angle, center): + """box_size: [x1,x2,...,xn,3] + angle: [x1,x2,...,xn] + center: [x1,x2,...,xn,3] + Return: + [x1,x3,...,xn,8,3] + """ + input_shape = angle.shape + R = roty_batch(angle) + l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1] + w = np.expand_dims(box_size[..., 1], -1) + h = np.expand_dims(box_size[..., 2], -1) + corners_3d = np.zeros(tuple(list(input_shape) + [8, 3])) + corners_3d[..., :, 0] = np.concatenate( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1) + corners_3d[..., :, 1] = np.concatenate( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1) + corners_3d[..., :, 2] = np.concatenate( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist))) + corners_3d += np.expand_dims(center, -2) + return corners_3d + + +####### GIoU related operations. Differentiable ############# + + +def helper_computeIntersection(cp1: torch.Tensor, cp2: torch.Tensor, + s: torch.Tensor, e: torch.Tensor): + dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] + dp = [s[0] - e[0], s[1] - e[1]] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + # return [(n1*dp[0] - n2*dc[0]) * n3, (n1*dp[1] - n2*dc[1]) * n3] + return torch.stack([(n1 * dp[0] - n2 * dc[0]) * n3, + (n1 * dp[1] - n2 * dc[1]) * n3]) + + +def helper_inside(cp1: torch.Tensor, cp2: torch.Tensor, p: torch.Tensor): + ineq = (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - cp1[1]) * (p[0] - + cp1[0]) + return ineq.item() + + +def polygon_clip_unnest(subjectPolygon: torch.Tensor, + clipPolygon: torch.Tensor): + """Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + outputList = [subjectPolygon[x] for x in range(subjectPolygon.shape[0])] + cp1 = clipPolygon[-1] + + for clipVertex in clipPolygon: + cp2 = clipVertex + inputList = outputList.copy() + outputList.clear() + s = inputList[-1] + + for subjectVertex in inputList: + e = subjectVertex + if helper_inside(cp1, cp2, e): + if not helper_inside(cp1, cp2, s): + outputList.append( + helper_computeIntersection(cp1, cp2, s, e)) + outputList.append(e) + elif helper_inside(cp1, cp2, s): + outputList.append(helper_computeIntersection(cp1, cp2, s, e)) + s = e + cp1 = cp2 + if len(outputList) == 0: + # return None + break + return outputList + + +def box3d_vol_tensor(corners): + EPS = 1e-6 + reshape = False + B, K = corners.shape[0], corners.shape[1] + if len(corners.shape) == 4: + # batch x prop x 8 x 3 + reshape = True + corners = corners.view(-1, 8, 3) + a = torch.sqrt( + (corners[:, 0, :] - corners[:, 1, :]).pow(2).sum(dim=1).clamp(min=EPS)) + b = torch.sqrt( + (corners[:, 1, :] - corners[:, 2, :]).pow(2).sum(dim=1).clamp(min=EPS)) + c = torch.sqrt( + (corners[:, 0, :] - corners[:, 4, :]).pow(2).sum(dim=1).clamp(min=EPS)) + vols = a * b * c + if reshape: + vols = vols.view(B, K) + return vols + + +def enclosing_box3d_vol(corners1, corners2): + """volume of enclosing axis-aligned box.""" + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners2.shape[2] == 8 + assert corners2.shape[3] == 3 + EPS = 1e-6 + + corners1 = corners1.clone() + corners2 = corners2.clone() + # flip Y axis, since it is negative + corners1[:, :, :, 1] *= -1 + corners2[:, :, :, 1] *= -1 + + al_xmin = torch.min( + torch.min(corners1[:, :, :, 0], dim=2).values[:, :, None], + torch.min(corners2[:, :, :, 0], dim=2).values[:, None, :], + ) + al_ymin = torch.max( + torch.max(corners1[:, :, :, 1], dim=2).values[:, :, None], + torch.max(corners2[:, :, :, 1], dim=2).values[:, None, :], + ) + al_zmin = torch.min( + torch.min(corners1[:, :, :, 2], dim=2).values[:, :, None], + torch.min(corners2[:, :, :, 2], dim=2).values[:, None, :], + ) + al_xmax = torch.max( + torch.max(corners1[:, :, :, 0], dim=2).values[:, :, None], + torch.max(corners2[:, :, :, 0], dim=2).values[:, None, :], + ) + al_ymax = torch.min( + torch.min(corners1[:, :, :, 1], dim=2).values[:, :, None], + torch.min(corners2[:, :, :, 1], dim=2).values[:, None, :], + ) + al_zmax = torch.max( + torch.max(corners1[:, :, :, 2], dim=2).values[:, :, None], + torch.max(corners2[:, :, :, 2], dim=2).values[:, None, :], + ) + + diff_x = torch.abs(al_xmax - al_xmin) + diff_y = torch.abs(al_ymax - al_ymin) + diff_z = torch.abs(al_zmax - al_zmin) + vol = diff_x * diff_y * diff_z + return vol + + +def generalized_box3d_iou_tensor( + corners1: torch.Tensor, + corners2: torch.Tensor, + nums_k2: torch.Tensor, + rotated_boxes: bool = True, + return_inter_vols_only: bool = False, +): + """ + Input: + corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y + corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y + Assumes that the box is only rotated along Z direction + Returns: + B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned + """ + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == corners2.shape[2] + assert corners1.shape[3] == corners2.shape[3] + + B, K1 = corners1.shape[0], corners1.shape[1] + _, K2 = corners2.shape[0], corners2.shape[1] + + # # box height. Y is negative, so max is torch.min + ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, + 1][:, None, :]) + ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, + 1][:, None, :]) + height = (ymax - ymin).clamp(min=0) + EPS = 1e-8 + + idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device) + idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device) + rect1 = corners1[:, :, idx, :] + rect2 = corners2[:, :, idx, :] + rect1 = rect1[:, :, :, idx2] + rect2 = rect2[:, :, :, idx2] + + lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, + None, :, :]) + rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, + None, :, :]) + wh = (rb - lt).clamp(min=0) + non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1] + non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2) + if nums_k2 is not None: + for b in range(B): + non_rot_inter_areas[b, :, nums_k2[b]:] = 0 + + enclosing_vols = enclosing_box3d_vol(corners1, corners2) + + # vols of boxes + vols1 = box3d_vol_tensor(corners1).clamp(min=EPS) + vols2 = box3d_vol_tensor(corners2).clamp(min=EPS) + + sum_vols = vols1[:, :, None] + vols2[:, None, :] + + # filter malformed boxes + good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS) + + if rotated_boxes: + inter_areas = torch.zeros((B, K1, K2), dtype=torch.float32) + rect1 = rect1.cpu() + rect2 = rect2.cpu() + nums_k2_np = to_list_1d(nums_k2) + non_rot_inter_areas_np = to_list_3d(non_rot_inter_areas) + for b in range(B): + for k1 in range(K1): + for k2 in range(K2): + if nums_k2 is not None and k2 >= nums_k2_np[b]: + break + if non_rot_inter_areas_np[b][k1][k2] == 0: + continue + ##### compute volume of intersection + inter = polygon_clip_unnest(rect1[b, k1], rect2[b, k2]) + if len(inter) > 0: + xs = torch.stack([x[0] for x in inter]) + ys = torch.stack([x[1] for x in inter]) + inter_areas[b, k1, k2] = torch.abs( + torch.dot(xs, torch.roll(ys, 1)) - + torch.dot(ys, torch.roll(xs, 1))) + inter_areas.mul_(0.5) + else: + inter_areas = non_rot_inter_areas + + inter_areas = inter_areas.to(corners1.device) + ### gIOU = iou - (1 - sum_vols/enclose_vol) + inter_vols = inter_areas * height + if return_inter_vols_only: + return inter_vols + + union_vols = (sum_vols - inter_vols).clamp(min=EPS) + ious = inter_vols / union_vols + giou_second_term = -(1 - union_vols / enclosing_vols) + gious = ious + giou_second_term + gious *= good_boxes + if nums_k2 is not None: + mask = torch.zeros((B, K1, K2), + device=height.device, + dtype=torch.float32) + for b in range(B): + mask[b, :, :nums_k2[b]] = 1 + gious *= mask + return gious + + +generalized_box3d_iou_tensor_jit = torch.jit.script( + generalized_box3d_iou_tensor) + + +def generalized_box3d_iou_cython( + corners1: torch.Tensor, + corners2: torch.Tensor, + nums_k2: torch.Tensor, + rotated_boxes: bool = True, + return_inter_vols_only: bool = False, +): + """ + Input: + corners1: torch Tensor (B, K1, 8, 3), assume up direction is negative Y + corners2: torch Tensor (B, K2, 8, 3), assume up direction is negative Y + Assumes that the box is only rotated along Z direction + Returns: + B x K1 x K2 matrix of generalized IOU by approximating the boxes to be axis aligned + """ + assert len(corners1.shape) == 4 + assert len(corners2.shape) == 4 + assert corners1.shape[2] == 8 + assert corners1.shape[3] == 3 + assert corners1.shape[0] == corners2.shape[0] + assert corners1.shape[2] == corners2.shape[2] + assert corners1.shape[3] == corners2.shape[3] + + B, K1 = corners1.shape[0], corners1.shape[1] + _, K2 = corners2.shape[0], corners2.shape[1] + + # # box height. Y is negative, so max is torch.min + ymax = torch.min(corners1[:, :, 0, 1][:, :, None], corners2[:, :, 0, + 1][:, None, :]) + ymin = torch.max(corners1[:, :, 4, 1][:, :, None], corners2[:, :, 4, + 1][:, None, :]) + height = (ymax - ymin).clamp(min=0) + EPS = 1e-8 + + idx = torch.arange(start=3, end=-1, step=-1, device=corners1.device) + idx2 = torch.tensor([0, 2], dtype=torch.int64, device=corners1.device) + rect1 = corners1[:, :, idx, :] + rect2 = corners2[:, :, idx, :] + rect1 = rect1[:, :, :, idx2] + rect2 = rect2[:, :, :, idx2] + + lt = torch.max(rect1[:, :, 1][:, :, None, :], rect2[:, :, 1][:, + None, :, :]) + rb = torch.min(rect1[:, :, 3][:, :, None, :], rect2[:, :, 3][:, + None, :, :]) + wh = (rb - lt).clamp(min=0) + non_rot_inter_areas = wh[:, :, :, 0] * wh[:, :, :, 1] + non_rot_inter_areas = non_rot_inter_areas.view(B, K1, K2) + if nums_k2 is not None: + for b in range(B): + non_rot_inter_areas[b, :, nums_k2[b]:] = 0 + + enclosing_vols = enclosing_box3d_vol(corners1, corners2) + + # vols of boxes + vols1 = box3d_vol_tensor(corners1).clamp(min=EPS) + vols2 = box3d_vol_tensor(corners2).clamp(min=EPS) + + sum_vols = vols1[:, :, None] + vols2[:, None, :] + + # filter malformed boxes + good_boxes = (enclosing_vols > 2 * EPS) * (sum_vols > 4 * EPS) + + if rotated_boxes: + inter_areas = np.zeros((B, K1, K2), dtype=np.float32) + rect1 = rect1.cpu().numpy().astype(np.float32) + rect2 = rect2.cpu().numpy().astype(np.float32) + nums_k2_np = nums_k2.cpu().detach().numpy().astype(np.int32) + non_rot_inter_areas_np = ( + non_rot_inter_areas.cpu().detach().numpy().astype(np.float32)) + box_intersection(rect1, rect2, non_rot_inter_areas_np, nums_k2_np, + inter_areas, True) + inter_areas = torch.from_numpy(inter_areas) + else: + inter_areas = non_rot_inter_areas + + inter_areas = inter_areas.to(corners1.device) + ### gIOU = iou - (1 - sum_vols/enclose_vol) + inter_vols = inter_areas * height + if return_inter_vols_only: + return inter_vols + + union_vols = (sum_vols - inter_vols).clamp(min=EPS) + ious = inter_vols / union_vols + giou_second_term = -(1 - union_vols / enclosing_vols) + gious = ious + giou_second_term + gious *= good_boxes + if nums_k2 is not None: + mask = torch.zeros((B, K1, K2), + device=height.device, + dtype=torch.float32) + for b in range(B): + mask[b, :, :nums_k2[b]] = 1 + gious *= mask + return gious + + +def generalized_box3d_iou( + corners1: torch.Tensor, + corners2: torch.Tensor, + nums_k2: torch.Tensor, + rotated_boxes: bool = True, + return_inter_vols_only: bool = False, + needs_grad: bool = False, +): + if needs_grad is True or box_intersection is None: + context = torch.enable_grad if needs_grad else torch.no_grad + with context(): + return generalized_box3d_iou_tensor_jit(corners1, corners2, + nums_k2, rotated_boxes, + return_inter_vols_only) + + else: + # Cythonized implementation of GIoU + with torch.no_grad(): + return generalized_box3d_iou_cython(corners1, corners2, nums_k2, + rotated_boxes, + return_inter_vols_only) + + +# helper functions to map proposals with ground truth bounding boxes +def get_box3d_min_max_batch_tensor(corner): + ''' Compute min and max coordinates for 3D bounding box + Note: only for axis-aligned bounding boxes + + Input: + corners: PyTorch tensor (N,8,3), assume up direction is Z (batch of N samples) + Output: + box_min_max: an tensor for min and max coordinates of 3D bounding box IoU + + ''' + + min_coord, _ = corner.min(dim=1) + max_coord, _ = corner.max(dim=1) + x_min, x_max = min_coord[:, 0], max_coord[:, 0] + y_min, y_max = min_coord[:, 1], max_coord[:, 1] + z_min, z_max = min_coord[:, 2], max_coord[:, 2] + + return x_min, x_max, y_min, y_max, z_min, z_max + + +def box3d_iou_batch_tensor(corners1, corners2): + ''' Compute 3D bounding box IoU. + Note: only for axis-aligned bounding boxes + + Input: + corners1: PyTorch tensor (N,8,3), assume up direction is Z (batch of N samples) + corners2: PyTorch tensor (N,8,3), assume up direction is Z (batch of N samples) + Output: + iou: an tensor of 3D bounding box IoU (N) + + ''' + + x_min_1, x_max_1, y_min_1, y_max_1, z_min_1, z_max_1 = get_box3d_min_max_batch_tensor( + corners1) + x_min_2, x_max_2, y_min_2, y_max_2, z_min_2, z_max_2 = get_box3d_min_max_batch_tensor( + corners2) + xA = torch.max(x_min_1, x_min_2) + yA = torch.max(y_min_1, y_min_2) + zA = torch.max(z_min_1, z_min_2) + xB = torch.min(x_max_1, x_max_2) + yB = torch.min(y_max_1, y_max_2) + zB = torch.min(z_max_1, z_max_2) + zeros = corners1.new_zeros(xA.shape).cuda() + inter_vol = torch.max((xB - xA), zeros) * torch.max( + (yB - yA), zeros) * torch.max((zB - zA), zeros) + box_vol_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) * (z_max_1 - z_min_1) + box_vol_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2) * (z_max_2 - z_min_2) + iou = inter_vol / (box_vol_1 + box_vol_2 - inter_vol + 1e-8) + + return iou diff --git a/models/LL3DA/utils/capeval/bleu/__init__.py b/models/LL3DA/utils/capeval/bleu/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/LL3DA/utils/capeval/bleu/bleu.py b/models/LL3DA/utils/capeval/bleu/bleu.py new file mode 100644 index 0000000..390f8d7 --- /dev/null +++ b/models/LL3DA/utils/capeval/bleu/bleu.py @@ -0,0 +1,48 @@ +#!/usr/bin/env python +# +# File Name : bleu.py +# +# Description : Wrapper for BLEU scorer. +# +# Creation Date : 06-01-2015 +# Last Modified : Thu 19 Mar 2015 09:13:28 PM PDT +# Authors : Hao Fang and Tsung-Yi Lin + +from .bleu_scorer import BleuScorer + + +class Bleu: + + def __init__(self, n=4): + # default compute Blue score up to 4 + self._n = n + self._hypo_for_image = {} + self.ref_for_image = {} + + def compute_score(self, gts, res): + + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + + bleu_scorer = BleuScorer(n=self._n) + for id in imgIds: + hypo = res[id] + ref = gts[id] + + # Sanity check. + assert (type(hypo) is list) + assert (len(hypo) >= 1) + assert (type(ref) is list) + assert (len(ref) >= 1) + + bleu_scorer += (hypo[0], ref) + + #score, scores = bleu_scorer.compute_score(option='shortest') + score, scores = bleu_scorer.compute_score(option='closest', verbose=0) + #score, scores = bleu_scorer.compute_score(option='average', verbose=1) + + # return (bleu, bleu_info) + return score, scores + + def method(self): + return 'Bleu' diff --git a/models/LL3DA/utils/capeval/bleu/bleu_scorer.py b/models/LL3DA/utils/capeval/bleu/bleu_scorer.py new file mode 100644 index 0000000..f422dfc --- /dev/null +++ b/models/LL3DA/utils/capeval/bleu/bleu_scorer.py @@ -0,0 +1,283 @@ +#!/usr/bin/env python + +# bleu_scorer.py +# David Chiang + +# Copyright (c) 2004-2006 University of Maryland. All rights +# reserved. Do not redistribute without permission from the +# author. Not for commercial use. + +# Modified by: +# Hao Fang +# Tsung-Yi Lin +'''Provides: +cook_refs(refs, n=4): Transform a list of reference sentences as strings into a form usable by cook_test(). +cook_test(test, refs, n=4): Transform a test sentence as a string (together with the cooked reference sentences) into a form usable by score_cooked(). +''' + +import copy +import math +import re +import sys +from collections import defaultdict + + +def precook(s, n=4, out=False): + """Takes a string as input and returns an object that can be given to + either cook_refs or cook_test. + + This is optional: cook_refs and cook_test can take string arguments as + well. + """ + words = s.split() + counts = defaultdict(int) + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i:i + k]) + counts[ngram] += 1 + return (len(words), counts) + + +def cook_refs(refs, eff=None, n=4): ## lhuang: oracle will call with "average" + """Takes a list of reference sentences for a single segment and returns an + object that encapsulates everything that BLEU needs to know about them.""" + + reflen = [] + maxcounts = {} + for ref in refs: + rl, counts = precook(ref, n) + reflen.append(rl) + for (ngram, count) in counts.items(): + maxcounts[ngram] = max(maxcounts.get(ngram, 0), count) + + # Calculate effective reference sentence length. + if eff == 'shortest': + reflen = min(reflen) + elif eff == 'average': + reflen = float(sum(reflen)) / len(reflen) + + ## lhuang: N.B.: leave reflen computaiton to the very end!! + + ## lhuang: N.B.: in case of "closest", keep a list of reflens!! (bad design) + + return (reflen, maxcounts) + + +def cook_test(test, refs, eff=None, n=4): + """Takes a test sentence and returns an object that encapsulates everything + that BLEU needs to know about it.""" + + reflen, refmaxcounts = refs + testlen, counts = precook(test, n, True) + + result = {} + + # Calculate effective reference sentence length. + + if eff == 'closest': + result['reflen'] = min((abs(l - testlen), l) for l in reflen)[1] + else: ## i.e., "average" or "shortest" or None + result['reflen'] = reflen + + result['testlen'] = testlen + + result['guess'] = [max(0, testlen - k + 1) for k in range(1, n + 1)] + + result['correct'] = [0] * n + for (ngram, count) in counts.items(): + result['correct'][len(ngram) - 1] += min(refmaxcounts.get(ngram, 0), + count) + + return result + + +class BleuScorer(object): + """Bleu scorer.""" + + __slots__ = 'n', 'crefs', 'ctest', '_score', '_ratio', '_testlen', '_reflen', 'special_reflen' + + # special_reflen is used in oracle (proportional effective ref len for a node). + + def copy(self): + """copy the refs.""" + new = BleuScorer(n=self.n) + new.ctest = copy.copy(self.ctest) + new.crefs = copy.copy(self.crefs) + new._score = None + return new + + def __init__(self, test=None, refs=None, n=4, special_reflen=None): + """singular instance.""" + + self.n = n + self.crefs = [] + self.ctest = [] + self.cook_append(test, refs) + self.special_reflen = special_reflen + + def cook_append(self, test, refs): + """called by constructor and __iadd__ to avoid creating new + instances.""" + + if refs is not None: + self.crefs.append(cook_refs(refs)) + if test is not None: + cooked_test = cook_test(test, self.crefs[-1]) + self.ctest.append(cooked_test) ## N.B.: -1 + else: + self.ctest.append( + None) # lens of crefs and ctest have to match + + self._score = None ## need to recompute + + def ratio(self, option=None): + self.compute_score(option=option) + return self._ratio + + def score_ratio(self, option=None): + """return (bleu, len_ratio) pair.""" + return (self.fscore(option=option), self.ratio(option=option)) + + def score_ratio_str(self, option=None): + return '%.4f (%.2f)' % self.score_ratio(option) + + def reflen(self, option=None): + self.compute_score(option=option) + return self._reflen + + def testlen(self, option=None): + self.compute_score(option=option) + return self._testlen + + def retest(self, new_test): + if type(new_test) is str: + new_test = [new_test] + assert len(new_test) == len(self.crefs), new_test + self.ctest = [] + for t, rs in zip(new_test, self.crefs): + self.ctest.append(cook_test(t, rs)) + self._score = None + + return self + + def rescore(self, new_test): + """replace test(s) with new test(s), and returns the new score.""" + + return self.retest(new_test).compute_score() + + def size(self): + assert len(self.crefs) == len( + self.ctest), 'refs/test mismatch! %d<>%d' % (len( + self.crefs), len(self.ctest)) + return len(self.crefs) + + def __iadd__(self, other): + """add an instance (e.g., from another sentence).""" + + if type(other) is tuple: + ## avoid creating new BleuScorer instances + self.cook_append(other[0], other[1]) + else: + assert self.compatible(other), 'incompatible BLEUs.' + self.ctest.extend(other.ctest) + self.crefs.extend(other.crefs) + self._score = None ## need to recompute + + return self + + def compatible(self, other): + return isinstance(other, BleuScorer) and self.n == other.n + + def single_reflen(self, option='average'): + return self._single_reflen(self.crefs[0][0], option) + + def _single_reflen(self, reflens, option=None, testlen=None): + + if option == 'shortest': + reflen = min(reflens) + elif option == 'average': + reflen = float(sum(reflens)) / len(reflens) + elif option == 'closest': + reflen = min((abs(l - testlen), l) for l in reflens)[1] + else: + assert False, 'unsupported reflen option %s' % option + + return reflen + + def recompute_score(self, option=None, verbose=0): + self._score = None + return self.compute_score(option, verbose) + + def compute_score(self, option=None, verbose=0): + n = self.n + small = 1e-9 + tiny = 1e-15 ## so that if guess is 0 still return 0 + bleu_list = [[] for _ in range(n)] + + if self._score is not None: + return self._score + + if option is None: + option = 'average' if len(self.crefs) == 1 else 'closest' + + self._testlen = 0 + self._reflen = 0 + totalcomps = { + 'testlen': 0, + 'reflen': 0, + 'guess': [0] * n, + 'correct': [0] * n + } + + # for each sentence + for comps in self.ctest: + testlen = comps['testlen'] + self._testlen += testlen + + if self.special_reflen is None: ## need computation + reflen = self._single_reflen(comps['reflen'], option, testlen) + else: + reflen = self.special_reflen + + self._reflen += reflen + + for key in ['guess', 'correct']: + for k in range(n): + totalcomps[key][k] += comps[key][k] + + # append per image bleu score + bleu = 1. + for k in range(n): + bleu *= (float(comps['correct'][k]) + tiny) \ + /(float(comps['guess'][k]) + small) + bleu_list[k].append(bleu**(1. / (k + 1))) + ratio = (testlen + tiny) / (reflen + small + ) ## N.B.: avoid zero division + if ratio < 1: + for k in range(n): + bleu_list[k][-1] *= math.exp(1 - 1 / ratio) + + if verbose > 1: + print(comps, reflen) + + totalcomps['reflen'] = self._reflen + totalcomps['testlen'] = self._testlen + + bleus = [] + bleu = 1. + for k in range(n): + bleu *= float(totalcomps['correct'][k] + tiny) \ + / (totalcomps['guess'][k] + small) + bleus.append(bleu**(1. / (k + 1))) + ratio = (self._testlen + tiny) / (self._reflen + small + ) ## N.B.: avoid zero division + if ratio < 1: + for k in range(n): + bleus[k] *= math.exp(1 - 1 / ratio) + + if verbose > 0: + print(totalcomps) + print('ratio:', ratio) + + self._score = bleus + return self._score, bleu_list diff --git a/models/LL3DA/utils/capeval/cider/__init__.py b/models/LL3DA/utils/capeval/cider/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/LL3DA/utils/capeval/cider/cider.py b/models/LL3DA/utils/capeval/cider/cider.py new file mode 100644 index 0000000..86d69f7 --- /dev/null +++ b/models/LL3DA/utils/capeval/cider/cider.py @@ -0,0 +1,54 @@ +# Filename: cider.py +# +# Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric +# by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726) +# +# Creation Date: Sun Feb 8 14:16:54 2015 +# +# Authors: Ramakrishna Vedantam and Tsung-Yi Lin + +import pdb + +from .cider_scorer import CiderScorer + + +class Cider: + """Main Class to compute the CIDEr metric.""" + + def __init__(self, test=None, refs=None, n=4, sigma=6.0): + # set cider to sum over 1 to 4-grams + self._n = n + # set the standard deviation parameter for gaussian penalty + self._sigma = sigma + + def compute_score(self, gts, res): + """Main function to compute CIDEr score. + + :param hypo_for_image (dict) : dictionary with key and value + ref_for_image (dict) : dictionary with key and value + :return: cider (float) : computed CIDEr score for the corpus + """ + + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + + cider_scorer = CiderScorer(n=self._n, sigma=self._sigma) + + for id in imgIds: + hypo = res[id] + ref = gts[id] + + # Sanity check. + assert (type(hypo) is list) + assert (len(hypo) >= 1) + assert (type(ref) is list) + assert (len(ref) > 0) + + cider_scorer += (hypo[0], ref) + + (score, scores) = cider_scorer.compute_score() + + return score, scores + + def method(self): + return 'CIDEr' diff --git a/models/LL3DA/utils/capeval/cider/cider_scorer.py b/models/LL3DA/utils/capeval/cider/cider_scorer.py new file mode 100644 index 0000000..b18372f --- /dev/null +++ b/models/LL3DA/utils/capeval/cider/cider_scorer.py @@ -0,0 +1,206 @@ +#!/usr/bin/env python +# Tsung-Yi Lin +# Ramakrishna Vedantam + +import copy +import math +import pdb +from collections import defaultdict + +import numpy as np + + +def precook(s, n=4, out=False): + """Takes a string as input and returns an object that can be given to + either cook_refs or cook_test. This is optional: cook_refs and cook_test + can take string arguments as well. + + :param s: string : sentence to be converted into ngrams + :param n: int : number of ngrams for which representation is calculated + :return: term frequency vector for occuring ngrams + """ + words = s.split() + counts = defaultdict(int) + for k in range(1, n + 1): + for i in range(len(words) - k + 1): + ngram = tuple(words[i:i + k]) + counts[ngram] += 1 + return counts + + +def cook_refs(refs, n=4): ## lhuang: oracle will call with "average" + """Takes a list of reference sentences for a single segment and returns an + object that encapsulates everything that BLEU needs to know about them. + + :param refs: list of string : reference sentences for some image + :param n: int : number of ngrams for which (ngram) representation is calculated + :return: result (list of dict) + """ + return [precook(ref, n) for ref in refs] + + +def cook_test(test, n=4): + """Takes a test sentence and returns an object that encapsulates everything + that BLEU needs to know about it. + + :param test: list of string : hypothesis sentence for some image + :param n: int : number of ngrams for which (ngram) representation is calculated + :return: result (dict) + """ + return precook(test, n, True) + + +class CiderScorer(object): + """CIDEr scorer.""" + + def copy(self): + """copy the refs.""" + new = CiderScorer(n=self.n) + new.ctest = copy.copy(self.ctest) + new.crefs = copy.copy(self.crefs) + return new + + def __init__(self, test=None, refs=None, n=4, sigma=6.0): + """singular instance.""" + self.n = n + self.sigma = sigma + self.crefs = [] + self.ctest = [] + self.document_frequency = defaultdict(float) + self.cook_append(test, refs) + self.ref_len = None + + def cook_append(self, test, refs): + """called by constructor and __iadd__ to avoid creating new + instances.""" + + if refs is not None: + self.crefs.append(cook_refs(refs)) + if test is not None: + self.ctest.append(cook_test(test)) ## N.B.: -1 + else: + self.ctest.append( + None) # lens of crefs and ctest have to match + + def size(self): + assert len(self.crefs) == len( + self.ctest), 'refs/test mismatch! %d<>%d' % (len( + self.crefs), len(self.ctest)) + return len(self.crefs) + + def __iadd__(self, other): + """add an instance (e.g., from another sentence).""" + + if type(other) is tuple: + ## avoid creating new CiderScorer instances + self.cook_append(other[0], other[1]) + else: + self.ctest.extend(other.ctest) + self.crefs.extend(other.crefs) + + return self + + def compute_doc_freq(self): + """Compute term frequency for reference data. + + This will be used to compute idf (inverse document frequency later) + The term frequency is stored in the object + :return: None + """ + for refs in self.crefs: + # refs, k ref captions of one image + for ngram in set( + [ngram for ref in refs for (ngram, count) in ref.items()]): + self.document_frequency[ngram] += 1 + # maxcounts[ngram] = max(maxcounts.get(ngram,0), count) + + def compute_cider(self): + + def counts2vec(cnts): + """Function maps counts of ngram to vector of tfidf weights. The + function returns vec, an array of dictionary that store mapping of + n-gram and tf-idf weights. The n-th entry of array denotes length + of n-grams. + + :param cnts: + :return: vec (array of dict), norm (array of float), length (int) + """ + vec = [defaultdict(float) for _ in range(self.n)] + length = 0 + norm = [0.0 for _ in range(self.n)] + for (ngram, term_freq) in cnts.items(): + # give word count 1 if it doesn't appear in reference corpus + df = np.log(max(1.0, self.document_frequency[ngram])) + # ngram index + n = len(ngram) - 1 + # tf (term_freq) * idf (precomputed idf) for n-grams + vec[n][ngram] = float(term_freq) * (self.ref_len - df) + # compute norm for the vector. the norm will be used for computing similarity + norm[n] += pow(vec[n][ngram], 2) + + if n == 1: + length += term_freq + norm = [np.sqrt(n) for n in norm] + return vec, norm, length + + def sim(vec_hyp, vec_ref, norm_hyp, norm_ref, length_hyp, length_ref): + """Compute the cosine similarity of two vectors. + + :param vec_hyp: array of dictionary for vector corresponding to hypothesis + :param vec_ref: array of dictionary for vector corresponding to reference + :param norm_hyp: array of float for vector corresponding to hypothesis + :param norm_ref: array of float for vector corresponding to reference + :param length_hyp: int containing length of hypothesis + :param length_ref: int containing length of reference + :return: array of score for each n-grams cosine similarity + """ + delta = float(length_hyp - length_ref) + # measure consine similarity + val = np.array([0.0 for _ in range(self.n)]) + for n in range(self.n): + # ngram + for (ngram, count) in vec_hyp[n].items(): + # vrama91 : added clipping + val[n] += min(vec_hyp[n][ngram], + vec_ref[n][ngram]) * vec_ref[n][ngram] + + if (norm_hyp[n] != 0) and (norm_ref[n] != 0): + val[n] /= (norm_hyp[n] * norm_ref[n]) + + assert (not math.isnan(val[n])) + # vrama91: added a length based gaussian penalty + val[n] *= np.e**(-(delta**2) / (2 * self.sigma**2)) + return val + + # compute log reference length + self.ref_len = np.log(float(len(self.crefs))) + + scores = [] + for test, refs in zip(self.ctest, self.crefs): + # compute vector for test captions + vec, norm, length = counts2vec(test) + # compute vector for ref captions + score = np.array([0.0 for _ in range(self.n)]) + for ref in refs: + vec_ref, norm_ref, length_ref = counts2vec(ref) + score += sim(vec, vec_ref, norm, norm_ref, length, length_ref) + # change by vrama91 - mean of ngram scores, instead of sum + score_avg = np.mean(score) + # divide by number of references + score_avg /= len(refs) + # multiply score by 10 + score_avg *= 10.0 + # append score of an image to the score list + scores.append(score_avg) + return scores + + def compute_score(self, option=None, verbose=0): + # compute idf + self.compute_doc_freq() + # assert to check document frequency + assert (len(self.ctest) >= max(self.document_frequency.values())) + # compute cider score + score = self.compute_cider() + # debug + # print score + return np.mean(np.array(score)), np.array(score) diff --git a/models/LL3DA/utils/capeval/meteor/__init__.py b/models/LL3DA/utils/capeval/meteor/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/LL3DA/utils/capeval/meteor/data/paraphrase-en.gz b/models/LL3DA/utils/capeval/meteor/data/paraphrase-en.gz new file mode 100644 index 0000000..d4d224a --- /dev/null +++ b/models/LL3DA/utils/capeval/meteor/data/paraphrase-en.gz @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c147ac7d2c91f2fbb3ad31e4b352235061eb83145e0434daf217ee9ca5975f48 +size 61813011 diff --git a/models/LL3DA/utils/capeval/meteor/meteor-1.5.jar b/models/LL3DA/utils/capeval/meteor/meteor-1.5.jar new file mode 100644 index 0000000..a833bc0 Binary files /dev/null and b/models/LL3DA/utils/capeval/meteor/meteor-1.5.jar differ diff --git a/models/LL3DA/utils/capeval/meteor/meteor.py b/models/LL3DA/utils/capeval/meteor/meteor.py new file mode 100644 index 0000000..7c4f460 --- /dev/null +++ b/models/LL3DA/utils/capeval/meteor/meteor.py @@ -0,0 +1,86 @@ +#!/usr/bin/env python + +# Python wrapper for METEOR implementation, by Xinlei Chen +# Acknowledge Michael Denkowski for the generous discussion and help + +import os +import subprocess +import sys +import threading + +# Assumes meteor-1.5.jar is in the same directory as meteor.py. Change as needed. +METEOR_JAR = 'meteor-1.5.jar' +# print METEOR_JAR + + +class Meteor: + + def __init__(self): + self.meteor_cmd = ['java', '-jar', '-Xmx2G', METEOR_JAR, \ + '-', '-', '-stdio', '-l', 'en', '-norm'] + self.meteor_p = subprocess.Popen(self.meteor_cmd, \ + cwd=os.path.dirname(os.path.abspath(__file__)), \ + stdin=subprocess.PIPE, \ + stdout=subprocess.PIPE, \ + stderr=subprocess.PIPE) + # Used to guarantee thread safety + self.lock = threading.Lock() + + def compute_score(self, gts, res): + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + scores = [] + + eval_line = 'EVAL' + self.lock.acquire() + for i in imgIds: + assert (len(res[i]) >= 1) + stat = self._stat(res[i][0], gts[i]) + eval_line += ' ||| {}'.format(stat) + + self.meteor_p.stdin.write('{}\n'.format(eval_line).encode()) + self.meteor_p.stdin.flush() + for i in range(0, len(imgIds)): + scores.append(float(self.meteor_p.stdout.readline().strip())) + score = float(self.meteor_p.stdout.readline().strip()) + self.lock.release() + + return score, scores + + def method(self): + return 'METEOR' + + def _stat(self, hypothesis_str, reference_list): + # SCORE ||| reference 1 words ||| reference n words ||| hypothesis words + hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ') + score_line = ' ||| '.join( + ('SCORE', ' ||| '.join(reference_list), hypothesis_str)) + score_line = score_line.replace('\n', '').replace('\r', '') + self.meteor_p.stdin.write('{}\n'.format(score_line).encode()) + self.meteor_p.stdin.flush() + return self.meteor_p.stdout.readline().decode().strip() + + def _score(self, hypothesis_str, reference_list): + self.lock.acquire() + # SCORE ||| reference 1 words ||| reference n words ||| hypothesis words + hypothesis_str = hypothesis_str.replace('|||', '').replace(' ', ' ') + score_line = ' ||| '.join( + ('SCORE', ' ||| '.join(reference_list), hypothesis_str)) + self.meteor_p.stdin.write('{}\n'.format(score_line)) + stats = self.meteor_p.stdout.readline().strip() + eval_line = 'EVAL ||| {}'.format(stats) + # EVAL ||| stats + self.meteor_p.stdin.write('{}\n'.format(eval_line)) + score = float(self.meteor_p.stdout.readline().strip()) + # bug fix: there are two values returned by the jar file, one average, and one all, so do it twice + # thanks for Andrej for pointing this out + score = float(self.meteor_p.stdout.readline().strip()) + self.lock.release() + return score + + def __del__(self): + self.lock.acquire() + self.meteor_p.stdin.close() + self.meteor_p.kill() + self.meteor_p.wait() + self.lock.release() diff --git a/models/LL3DA/utils/capeval/rouge/__init__.py b/models/LL3DA/utils/capeval/rouge/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/models/LL3DA/utils/capeval/rouge/rouge.py b/models/LL3DA/utils/capeval/rouge/rouge.py new file mode 100644 index 0000000..7626da4 --- /dev/null +++ b/models/LL3DA/utils/capeval/rouge/rouge.py @@ -0,0 +1,112 @@ +#!/usr/bin/env python +# +# File Name : rouge.py +# +# Description : Computes ROUGE-L metric as described by Lin and Hovey (2004) +# +# Creation Date : 2015-01-07 06:03 +# Author : Ramakrishna Vedantam + +import pdb + +import numpy as np + + +def my_lcs(string, sub): + """Calculates longest common subsequence for a pair of tokenized strings. + + :param string : list of str : tokens from a string split using whitespace + :param sub : list of str : shorter string, also split using whitespace + :returns: length (list of int): length of the longest common subsequence between the two strings + + Note: my_lcs only gives length of the longest common subsequence, not the actual LCS + """ + if (len(string) < len(sub)): + sub, string = string, sub + + lengths = [[0 for i in range(0, + len(sub) + 1)] + for j in range(0, + len(string) + 1)] + + for j in range(1, len(sub) + 1): + for i in range(1, len(string) + 1): + if (string[i - 1] == sub[j - 1]): + lengths[i][j] = lengths[i - 1][j - 1] + 1 + else: + lengths[i][j] = max(lengths[i - 1][j], lengths[i][j - 1]) + + return lengths[len(string)][len(sub)] + + +class Rouge(): + """Class for computing ROUGE-L score for a set of candidate sentences for + the MS COCO test set.""" + + def __init__(self): + # vrama91: updated the value below based on discussion with Hovey + self.beta = 1.2 + + def calc_score(self, candidate, refs): + """Compute ROUGE-L score given one candidate and references for an + image. + + :param candidate: str : candidate sentence to be evaluated + :param refs: list of str : COCO reference sentences for the particular image to be evaluated + :returns score: int (ROUGE-L score for the candidate evaluated against references) + """ + # assert(len(candidate)==0) + # assert(len(refs)>0) + prec = [] + rec = [] + + # split into tokens + token_c = candidate[0].split(' ') + + for reference in refs: + # split into tokens + token_r = reference.split(' ') + # compute the longest common subsequence + lcs = my_lcs(token_r, token_c) + prec.append(lcs / float(len(token_c))) + rec.append(lcs / float(len(token_r))) + + prec_max = max(prec) + rec_max = max(rec) + + if (prec_max != 0 and rec_max != 0): + score = ((1 + self.beta**2) * prec_max * + rec_max) / float(rec_max + self.beta**2 * prec_max) + else: + score = 0.0 + return score + + def compute_score(self, gts, res): + """Computes Rouge-L score given a set of reference and candidate + sentences for the dataset Invoked by evaluate_captions.py. + + :param hypo_for_image: dict : candidate / test sentences with "image name" key and "tokenized sentences" as values + :param ref_for_image: dict : reference MS-COCO sentences with "image name" key and "tokenized sentences" as values + :returns: average_score: float (mean ROUGE-L score computed by averaging scores for all the images) + """ + assert (gts.keys() == res.keys()) + imgIds = gts.keys() + + score = [] + for id in imgIds: + hypo = res[id] + ref = gts[id] + + score.append(self.calc_score(hypo, ref)) + + # Sanity check. + assert (type(hypo) is list) + assert (len(hypo) >= 1) + assert (type(ref) is list) + assert (len(ref) > 0) + + average_score = np.mean(np.array(score)) + return average_score, np.array(score) + + def method(self): + return 'Rouge' diff --git a/models/LL3DA/utils/cython_compile.py b/models/LL3DA/utils/cython_compile.py new file mode 100644 index 0000000..396f838 --- /dev/null +++ b/models/LL3DA/utils/cython_compile.py @@ -0,0 +1,15 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import numpy as np +from Cython.Build import cythonize +from setuptools import Extension, setup + +# hacky way to find numpy include path +# replace with actual path if this does not work +np_include_path = np.__file__.replace('__init__.py', 'core/include/') +INCLUDE_PATH = [np_include_path] + +setup(ext_modules=cythonize( + Extension('box_intersection', + sources=['box_intersection.pyx'], + include_dirs=INCLUDE_PATH)), ) diff --git a/models/LL3DA/utils/cython_compile.sh b/models/LL3DA/utils/cython_compile.sh new file mode 100644 index 0000000..6c2f2d7 --- /dev/null +++ b/models/LL3DA/utils/cython_compile.sh @@ -0,0 +1,3 @@ +#!/bin/bash +# Copyright (c) Facebook, Inc. and its affiliates. +python cython_compile.py build_ext --inplace diff --git a/models/LL3DA/utils/dist.py b/models/LL3DA/utils/dist.py new file mode 100644 index 0000000..3a47c76 --- /dev/null +++ b/models/LL3DA/utils/dist.py @@ -0,0 +1,176 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import pickle + +import torch +import torch.distributed as dist + + +def is_distributed(): + if not dist.is_available() or not dist.is_initialized(): + return False + return True + + +def get_rank(): + if not is_distributed(): + return 0 + return dist.get_rank() + + +def is_primary(): + return get_rank() == 0 + + +def get_world_size(): + if not is_distributed(): + return 1 + return dist.get_world_size() + + +def barrier(): + if not is_distributed(): + return + torch.distributed.barrier() + + +def setup_print_for_distributed(is_primary): + """This function disables printing when not in primary process.""" + import builtins as __builtin__ + builtin_print = __builtin__.print + + def print(*args, **kwargs): + force = kwargs.pop('force', False) + if is_primary or force: + builtin_print(*args, **kwargs) + + __builtin__.print = print + + +def init_distributed(gpu_id, global_rank, world_size, dist_url, dist_backend): + torch.cuda.set_device(gpu_id) + print( + f'| distributed init (rank {global_rank}) (world {world_size}): {dist_url}', + flush=True, + ) + torch.distributed.init_process_group( + backend=dist_backend, + init_method=dist_url, + world_size=world_size, + rank=global_rank, + ) + torch.distributed.barrier() + setup_print_for_distributed(is_primary()) + + +def all_reduce_sum(tensor): + if not is_distributed(): + return tensor + dim_squeeze = False + if tensor.ndim == 0: + tensor = tensor[None, ...] + dim_squeeze = True + torch.distributed.all_reduce(tensor) + if dim_squeeze: + tensor = tensor.squeeze(0) + return tensor + + +def all_reduce_average(tensor): + val = all_reduce_sum(tensor) + return val / get_world_size() + + +# Function from DETR - https://github.com/facebookresearch/detr/blob/master/util/misc.py +def reduce_dict(input_dict, average=True): + """ + Args: + input_dict (dict): all the values will be reduced + average (bool): whether to do average or sum + Reduce the values in the dictionary from all processes so that all processes + have the averaged results. Returns a dict with the same fields as + input_dict, after reduction. + """ + world_size = get_world_size() + if world_size < 2: + return input_dict + with torch.no_grad(): + names = [] + values = [] + # sort the keys so that they are consistent across processes + for k in sorted(input_dict.keys()): + names.append(k) + values.append(input_dict[k]) + values = torch.stack(values, dim=0) + torch.distributed.all_reduce(values) + if average: + values /= world_size + reduced_dict = {k: v for k, v in zip(names, values)} + return reduced_dict + + +# Function from https://github.com/facebookresearch/detr/blob/master/util/misc.py +def all_gather_pickle(data, device): + """ + Run all_gather on arbitrary picklable data (not necessarily tensors) + Args: + data: any picklable object + Returns: + list[data]: list of data gathered from each rank + """ + world_size = get_world_size() + if world_size == 1: + return [data] + + # serialized to a Tensor + buffer = pickle.dumps(data) + storage = torch.ByteStorage.from_buffer(buffer) + tensor = torch.ByteTensor(storage).to(device) + + # obtain Tensor size of each rank + local_size = torch.tensor([tensor.numel()], device=device) + size_list = [torch.tensor([0], device=device) for _ in range(world_size)] + dist.all_gather(size_list, local_size) + size_list = [int(size.item()) for size in size_list] + max_size = max(size_list) + + # receiving Tensor from all ranks + # we pad the tensor because torch all_gather does not support + # gathering tensors of different shapes + tensor_list = [] + for _ in size_list: + tensor_list.append( + torch.empty((max_size, ), dtype=torch.uint8, device=device)) + if local_size != max_size: + padding = torch.empty(size=(max_size - local_size, ), + dtype=torch.uint8, + device=device) + tensor = torch.cat((tensor, padding), dim=0) + dist.all_gather(tensor_list, tensor) + + data_list = [] + for size, tensor in zip(size_list, tensor_list): + buffer = tensor.cpu().numpy().tobytes()[:size] + data_list.append(pickle.loads(buffer)) + + return data_list + + +def all_gather_dict(data): + """Run all_gather on data which is a dictionary of Tensors.""" + assert isinstance(data, dict) + + gathered_dict = {} + for item_key in data: + if isinstance(data[item_key], torch.Tensor): + if is_distributed(): + data[item_key] = data[item_key].contiguous() + tensor_list = [ + torch.empty_like(data[item_key]) + for _ in range(get_world_size()) + ] + dist.all_gather(tensor_list, data[item_key]) + gathered_tensor = torch.cat(tensor_list, dim=0) + else: + gathered_tensor = data[item_key] + gathered_dict[item_key] = gathered_tensor + return gathered_dict diff --git a/models/LL3DA/utils/download_weights.py b/models/LL3DA/utils/download_weights.py new file mode 100644 index 0000000..3c98fdd --- /dev/null +++ b/models/LL3DA/utils/download_weights.py @@ -0,0 +1,37 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os +import pickle +from urllib import request + +import torch + +## Define the weights you want and where to store them +dataset = 'scannet' +encoder = '_masked' # or "" +epoch = 1080 +base_url = 'https://dl.fbaipublicfiles.com/3detr/checkpoints' +local_dir = '/tmp/' + +### Downloading the weights +weights_file = f'{dataset}{encoder}_ep{epoch}.pth' +metrics_file = f'{dataset}{encoder}_ep{epoch}_metrics.pkl' +local_weights = os.path.join(local_dir, weights_file) +local_metrics = os.path.join(local_dir, metrics_file) + +url = os.path.join(base_url, weights_file) +request.urlretrieve(url, local_weights) +print(f'Downloaded weights from {url} to {local_weights}') + +url = os.path.join(base_url, metrics_file) +request.urlretrieve(url, local_metrics) +print(f'Downloaded metrics from {url} to {local_metrics}') + +# weights can be simply loaded with pytorch +weights = torch.load(local_weights, map_location=torch.device('cpu')) +print('Weights loaded successfully.') + +# metrics can be loaded with pickle +with open(local_metrics, 'rb') as fh: + metrics = pickle.load(fh) +print('Metrics loaded successfully.') diff --git a/models/LL3DA/utils/eval_det.py b/models/LL3DA/utils/eval_det.py new file mode 100644 index 0000000..8e30f32 --- /dev/null +++ b/models/LL3DA/utils/eval_det.py @@ -0,0 +1,280 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Generic Code for Object Detection Evaluation. + +Input: +For each class: + For each image: + Predictions: box, score + Groundtruths: box + +Output: +For each class: + precision-recal and average precision + +Author: Charles R. Qi + +Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py +""" +import numpy as np +from utils.box_util import box3d_iou + + +def voc_ap(rec, prec, use_07_metric=False): + """ap = voc_ap(rec, prec, [use_07_metric]) + Compute VOC AP given precision and recall. + If use_07_metric is true, uses the + VOC 07 11 point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0.0 + for t in np.arange(0.0, 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11.0 + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.0], rec, [1.0])) + mpre = np.concatenate(([0.0], prec, [0.0])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +def get_iou_obb(bb1, bb2): + iou3d, iou2d = box3d_iou(bb1, bb2) + return iou3d + + +def get_iou_main(get_iou_func, args): + return get_iou_func(*args) + + +def eval_det_cls(pred, + gt, + ovthresh=0.25, + use_07_metric=False, + get_iou_func=get_iou_obb): + """Generic functions to compute precision/recall for object detection for a + single class. + + Input: + pred: map of {img_id: [(bbox, score)]} where bbox is numpy array + gt: map of {img_id: [bbox]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if True use VOC07 11 point method + Output: + rec: numpy array of length nd + prec: numpy array of length nd + ap: scalar, average precision + """ + + # construct gt objects + class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}} + npos = 0 + for img_id in gt.keys(): + bbox = np.array(gt[img_id]) + det = [False] * len(bbox) + npos += len(bbox) + class_recs[img_id] = {'bbox': bbox, 'det': det} + # pad empty list to all other imgids + for img_id in pred.keys(): + if img_id not in gt: + class_recs[img_id] = {'bbox': np.array([]), 'det': []} + + # construct dets + image_ids = [] + confidence = [] + BB = [] + for img_id in pred.keys(): + for box, score in pred[img_id]: + image_ids.append(img_id) + confidence.append(score) + BB.append(box) + confidence = np.array(confidence) + BB = np.array(BB) # (nd,4 or 8,3 or 6) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + sorted_scores = np.sort(-confidence) + BB = BB[sorted_ind, ...] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + # if d%100==0: print(d) + R = class_recs[image_ids[d]] + bb = BB[d, ...].astype(float) + ovmax = -np.inf + BBGT = R['bbox'].astype(float) + + if BBGT.size > 0: + # compute overlaps + for j in range(BBGT.shape[0]): + iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...])) + if iou > ovmax: + ovmax = iou + jmax = j + + # print d, ovmax + if ovmax > ovthresh: + if not R['det'][jmax]: + tp[d] = 1.0 + R['det'][jmax] = 1 + else: + fp[d] = 1.0 + else: + fp[d] = 1.0 + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + if npos == 0: + rec = np.zeros_like(tp) + else: + rec = tp / float(npos) + # print('NPOS: ', npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap + + +def eval_det_cls_wrapper(arguments): + pred, gt, ovthresh, use_07_metric, get_iou_func = arguments + rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, + get_iou_func) + return (rec, prec, ap) + + +def eval_det(pred_all, + gt_all, + ovthresh=0.25, + use_07_metric=False, + get_iou_func=None): + """Generic functions to compute precision/recall for object detection for + multiple classes. + + Input: + pred_all: map of {img_id: [(classname, bbox, score)]} + gt_all: map of {img_id: [(classname, bbox)]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if true use VOC07 11 point method + Output: + rec: {classname: rec} + prec: {classname: prec_all} + ap: {classname: scalar} + """ + pred = {} # map {classname: pred} + gt = {} # map {classname: gt} + for img_id in pred_all.keys(): + for classname, bbox, score in pred_all[img_id]: + if classname not in pred: + pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + pred[classname][img_id].append((bbox, score)) + for img_id in gt_all.keys(): + for classname, bbox in gt_all[img_id]: + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + gt[classname][img_id].append(bbox) + + rec = {} + prec = {} + ap = {} + for classname in gt.keys(): + # print('Computing AP for class: ', classname) + rec[classname], prec[classname], ap[classname] = eval_det_cls( + pred[classname], gt[classname], ovthresh, use_07_metric, + get_iou_func) + # print(classname, ap[classname]) + + return rec, prec, ap + + +from multiprocessing import Pool + + +def eval_det_multiprocessing(pred_all, + gt_all, + ovthresh=0.25, + use_07_metric=False, + get_iou_func=get_iou_obb): + """Generic functions to compute precision/recall for object detection for + multiple classes. + + Input: + pred_all: map of {img_id: [(classname, bbox, score)]} + gt_all: map of {img_id: [(classname, bbox)]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if true use VOC07 11 point method + Output: + rec: {classname: rec} + prec: {classname: prec_all} + ap: {classname: scalar} + """ + pred = {} # map {classname: pred} + gt = {} # map {classname: gt} + for img_id in pred_all.keys(): + for classname, bbox, score in pred_all[img_id]: + if classname not in pred: + pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + pred[classname][img_id].append((bbox, score)) + for img_id in gt_all.keys(): + for classname, bbox in gt_all[img_id]: + if classname not in gt: + gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + gt[classname][img_id].append(bbox) + + rec = {} + prec = {} + ap = {} + p = Pool(processes=10) + ret_values = p.map( + eval_det_cls_wrapper, + [(pred[classname], gt[classname], ovthresh, use_07_metric, + get_iou_func) for classname in gt.keys() if classname in pred], + ) + p.close() + for i, classname in enumerate(gt.keys()): + if classname in pred: + rec[classname], prec[classname], ap[classname] = ret_values[i] + else: + rec[classname] = 0 + prec[classname] = 0 + ap[classname] = 0 + # print(classname, ap[classname]) + + return rec, prec, ap diff --git a/models/LL3DA/utils/io.py b/models/LL3DA/utils/io.py new file mode 100644 index 0000000..0f6c47a --- /dev/null +++ b/models/LL3DA/utils/io.py @@ -0,0 +1,64 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import os + +import torch +from utils.dist import is_primary + + +def save_checkpoint( + checkpoint_dir, + model_no_ddp, + optimizer, + epoch, + args, + best_val_metrics, + filename=None, +): + if not is_primary(): + return + if filename is None: + filename = f'checkpoint_{epoch:04d}.pth' + checkpoint_name = os.path.join(checkpoint_dir, filename) + + weight_ckpt = model_no_ddp.state_dict() + parameter_names = list(weight_ckpt.keys()) + for name in parameter_names: + if args.filter_name is not None and args.filter_name in name: + weight_ckpt.pop(name) + + sd = { + 'model': weight_ckpt, + 'optimizer': optimizer.state_dict(), + 'epoch': epoch, + 'args': args, + 'best_val_metrics': best_val_metrics, + } + torch.save(sd, checkpoint_name) + + +def resume_if_possible(checkpoint_dir, model_no_ddp, optimizer): + """Resume if checkpoint is available. Return. + + - epoch of loaded checkpoint. + """ + epoch = -1 + best_val_metrics = {} + if not os.path.isdir(checkpoint_dir): + return epoch, best_val_metrics + + last_checkpoint = os.path.join(checkpoint_dir, 'checkpoint.pth') + if not os.path.isfile(last_checkpoint): + return epoch, best_val_metrics + + sd = torch.load(last_checkpoint, map_location=torch.device('cpu')) + epoch = sd['epoch'] + best_val_metrics = sd['best_val_metrics'] + print(f'Found checkpoint at {epoch}. Resuming.') + + model_no_ddp.load_state_dict(sd['model'], strict=False) + optimizer.load_state_dict(sd['optimizer']) + print( + f'Loaded model and optimizer state at {epoch}. Loaded best val metrics so far.' + ) + return epoch, best_val_metrics diff --git a/models/LL3DA/utils/logger.py b/models/LL3DA/utils/logger.py new file mode 100644 index 0000000..177a212 --- /dev/null +++ b/models/LL3DA/utils/logger.py @@ -0,0 +1,32 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import torch + +try: + from tensorboardX import SummaryWriter +except ImportError: + print('Cannot import tensorboard. Will log to txt files only.') + SummaryWriter = None + +from utils.dist import is_primary + + +class Logger(object): + + def __init__(self, log_dir=None) -> None: + self.log_dir = log_dir + if SummaryWriter is not None and is_primary(): + self.writer = SummaryWriter(self.log_dir) + else: + self.writer = None + + def log_scalars(self, scalar_dict, step, prefix=None): + if self.writer is None: + return + for k in scalar_dict: + v = scalar_dict[k] + if isinstance(v, torch.Tensor): + v = v.detach().cpu().item() + if prefix is not None: + k = prefix + k + self.writer.add_scalar(k, v, step) diff --git a/models/LL3DA/utils/misc.py b/models/LL3DA/utils/misc.py new file mode 100644 index 0000000..3f2ebe2 --- /dev/null +++ b/models/LL3DA/utils/misc.py @@ -0,0 +1,102 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +from collections import deque +from typing import List + +import numpy as np +import torch +from utils.dist import all_reduce_sum, barrier, is_distributed + + +def my_worker_init_fn(worker_id): + np.random.seed(np.random.get_state()[1][0] + worker_id) + + +@torch.jit.ignore +def to_list_1d(arr) -> List[float]: + arr = arr.detach().cpu().numpy().tolist() + return arr + + +@torch.jit.ignore +def to_list_3d(arr) -> List[List[List[float]]]: + arr = arr.detach().cpu().numpy().tolist() + return arr + + +def huber_loss(error, delta=1.0): + """ + Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py + x = error = pred - gt or dist(pred,gt) + 0.5 * |x|^2 if |x|<=d + 0.5 * d^2 + d * (|x|-d) if |x|>d + """ + abs_error = torch.abs(error) + quadratic = torch.clamp(abs_error, max=delta) + linear = abs_error - quadratic + loss = 0.5 * quadratic**2 + delta * linear + return loss + + +# From https://github.com/facebookresearch/detr/blob/master/util/misc.py +class SmoothedValue(object): + """Track a series of values and provide access to smoothed values over a + window or the global series average.""" + + def __init__(self, window_size=20, fmt=None): + if fmt is None: + fmt = '{median:.4f} ({global_avg:.4f})' + self.deque = deque(maxlen=window_size) + self.total = 0.0 + self.count = 0 + self.fmt = fmt + + def update(self, value, n=1): + self.deque.append(value) + self.count += n + self.total += value * n + + def synchronize_between_processes(self): + """ + Warning: does not synchronize the deque! + """ + if not is_distributed(): + return + t = torch.tensor([self.count, self.total], + dtype=torch.float64, + device='cuda') + barrier() + all_reduce_sum(t) + t = t.tolist() + self.count = int(t[0]) + self.total = t[1] + + @property + def median(self): + d = torch.tensor(list(self.deque)) + return d.median().item() + + @property + def avg(self): + d = torch.tensor(list(self.deque), dtype=torch.float32) + return d.mean().item() + + @property + def global_avg(self): + return self.total / self.count + + @property + def max(self): + return max(self.deque) + + @property + def value(self): + return self.deque[-1] + + def __str__(self): + return self.fmt.format( + median=self.median, + avg=self.avg, + global_avg=self.global_avg, + max=self.max, + value=self.value, + ) diff --git a/models/LL3DA/utils/nms.py b/models/LL3DA/utils/nms.py new file mode 100644 index 0000000..107b69b --- /dev/null +++ b/models/LL3DA/utils/nms.py @@ -0,0 +1,162 @@ +# Copyright (c) Facebook, Inc. and its affiliates. + +import numpy as np + +# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score) +""" Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/ +Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py +""" + + +def nms_2d(boxes, overlap_threshold): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + score = boxes[:, 4] + area = (x2 - x1) * (y2 - y1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + suppress = [last - 1] + for pos in range(last - 1): + j = I[pos] + xx1 = max(x1[i], x1[j]) + yy1 = max(y1[i], y1[j]) + xx2 = min(x2[i], x2[j]) + yy2 = min(y2[i], y2[j]) + w = xx2 - xx1 + h = yy2 - yy1 + if w > 0 and h > 0: + o = w * h / area[j] + print('Overlap is', o) + if o > overlap_threshold: + suppress.append(pos) + I = np.delete(I, suppress) + return pick + + +def nms_2d_faster(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + score = boxes[:, 4] + area = (x2 - x1) * (y2 - y1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[:last - 1]]) + yy1 = np.maximum(y1[i], y1[I[:last - 1]]) + xx2 = np.minimum(x2[i], x2[I[:last - 1]]) + yy2 = np.minimum(y2[i], y2[I[:last - 1]]) + + w = np.maximum(0, xx2 - xx1) + h = np.maximum(0, yy2 - yy1) + + if old_type: + o = (w * h) / area[I[:last - 1]] + else: + inter = w * h + o = inter / (area[i] + area[I[:last - 1]] - inter) + + I = np.delete( + I, np.concatenate( + ([last - 1], np.where(o > overlap_threshold)[0]))) + + return pick + + +def nms_3d_faster(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + score = boxes[:, 6] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[:last - 1]]) + yy1 = np.maximum(y1[i], y1[I[:last - 1]]) + zz1 = np.maximum(z1[i], z1[I[:last - 1]]) + xx2 = np.minimum(x2[i], x2[I[:last - 1]]) + yy2 = np.minimum(y2[i], y2[I[:last - 1]]) + zz2 = np.minimum(z2[i], z2[I[:last - 1]]) + + l = np.maximum(0, xx2 - xx1) + w = np.maximum(0, yy2 - yy1) + h = np.maximum(0, zz2 - zz1) + + if old_type: + o = (l * w * h) / area[I[:last - 1]] + else: + inter = l * w * h + o = inter / (area[i] + area[I[:last - 1]] - inter) + + I = np.delete( + I, np.concatenate( + ([last - 1], np.where(o > overlap_threshold)[0]))) + + return pick + + +def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + score = boxes[:, 6] + cls = boxes[:, 7] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + + I = np.argsort(score) + pick = [] + while I.size != 0: + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[:last - 1]]) + yy1 = np.maximum(y1[i], y1[I[:last - 1]]) + zz1 = np.maximum(z1[i], z1[I[:last - 1]]) + xx2 = np.minimum(x2[i], x2[I[:last - 1]]) + yy2 = np.minimum(y2[i], y2[I[:last - 1]]) + zz2 = np.minimum(z2[i], z2[I[:last - 1]]) + cls1 = cls[i] + cls2 = cls[I[:last - 1]] + + l = np.maximum(0, xx2 - xx1) + w = np.maximum(0, yy2 - yy1) + h = np.maximum(0, zz2 - zz1) + + if old_type: + o = (l * w * h) / area[I[:last - 1]] + else: + inter = l * w * h + o = inter / (area[i] + area[I[:last - 1]] - inter) + o = o * (cls1 == cls2) + + I = np.delete( + I, np.concatenate( + ([last - 1], np.where(o > overlap_threshold)[0]))) + + return pick diff --git a/models/LL3DA/utils/pc_util.py b/models/LL3DA/utils/pc_util.py new file mode 100644 index 0000000..3ef9860 --- /dev/null +++ b/models/LL3DA/utils/pc_util.py @@ -0,0 +1,294 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +"""Utility functions for processing point clouds. + +Author: Charles R. Qi and Or Litany +""" + +import os +import sys + +# Point cloud IO +import numpy as np +import torch +# Mesh IO +import trimesh +from plyfile import PlyData, PlyElement + +# ---------------------------------------- +# Point Cloud Sampling +# ---------------------------------------- + + +def random_sampling(pc, num_sample, replace=None, return_choices=False): + """Input is NxC, output is num_samplexC.""" + if replace is None: + replace = pc.shape[0] < num_sample + choices = np.random.choice(pc.shape[0], num_sample, replace=replace) + if return_choices: + return pc[choices], choices + else: + return pc[choices] + + +# ---------------------------------------- +# Simple Point manipulations +# ---------------------------------------- +def shift_scale_points(pred_xyz, src_range, dst_range=None): + """ + pred_xyz: B x N x 3 + src_range: [[B x 3], [B x 3]] - min and max XYZ coords + dst_range: [[B x 3], [B x 3]] - min and max XYZ coords + """ + if dst_range is None: + dst_range = [ + torch.zeros((src_range[0].shape[0], 3), + device=src_range[0].device), + torch.ones((src_range[0].shape[0], 3), device=src_range[0].device), + ] + + if pred_xyz.ndim == 4: + src_range = [x[:, None] for x in src_range] + dst_range = [x[:, None] for x in dst_range] + + assert src_range[0].shape[0] == pred_xyz.shape[0] + assert dst_range[0].shape[0] == pred_xyz.shape[0] + assert src_range[0].shape[-1] == pred_xyz.shape[-1] + assert src_range[0].shape == src_range[1].shape + assert dst_range[0].shape == dst_range[1].shape + assert src_range[0].shape == dst_range[1].shape + + src_diff = src_range[1][:, None, :] - src_range[0][:, None, :] + dst_diff = dst_range[1][:, None, :] - dst_range[0][:, None, :] + prop_xyz = (((pred_xyz - src_range[0][:, None, :]) * dst_diff) / + src_diff) + dst_range[0][:, None, :] + return prop_xyz + + +def scale_points(pred_xyz, mult_factor): + if pred_xyz.ndim == 4: + mult_factor = mult_factor[:, None] + scaled_xyz = pred_xyz * mult_factor[:, None, :] + return scaled_xyz + + +def rotate_point_cloud(points, rotation_matrix=None): + """Input: (n,3), Output: (n,3)""" + # Rotate in-place around Z axis. + if rotation_matrix is None: + rotation_angle = np.random.uniform() * 2 * np.pi + sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle) + rotation_matrix = np.array([[cosval, sinval, 0], [-sinval, cosval, 0], + [0, 0, 1]]) + ctr = points.mean(axis=0) + rotated_data = np.dot(points - ctr, rotation_matrix) + ctr + return rotated_data, rotation_matrix + + +def rotate_pc_along_y(pc, rot_angle): + """Input ps is NxC points with first 3 channels as XYZ z is facing forward, + x is left ward, y is downward.""" + cosval = np.cos(rot_angle) + sinval = np.sin(rot_angle) + rotmat = np.array([[cosval, -sinval], [sinval, cosval]]) + pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat)) + return pc + + +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def rotz(t): + """Rotation about the z-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) + + +def point_cloud_to_bbox(points): + """Extract the axis aligned box from a pcl or batch of pcls + Args: + points: Nx3 points or BxNx3 + output is 6 dim: xyz pos of center and 3 lengths + """ + which_dim = len( + points.shape) - 2 # first dim if a single cloud and second if batch + mn, mx = points.min(which_dim), points.max(which_dim) + lengths = mx - mn + cntr = 0.5 * (mn + mx) + return np.concatenate([cntr, lengths], axis=which_dim) + + +def write_bbox(scene_bbox, out_filename): + """Export scene bbox to meshes + Args: + scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths + out_filename: (string) filename + + Note: + To visualize the boxes in MeshLab. + 1. Select the objects (the boxes) + 2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh + 3. Select Wireframe view. + """ + + def convert_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply') + + return + + +def write_oriented_bbox(scene_bbox, out_filename, colors=None): + """Export oriented (around Z axis) scene bbox to meshes + Args: + scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz) + and heading angle around Z axis. + Y forward, X right, Z upward. heading angle of positive X is 0, + heading angle of positive Y is 90 degrees. + out_filename: (string) filename + """ + + def heading2rotmat(heading_angle): + pass + rotmat = np.zeros((3, 3)) + rotmat[2, 2] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + if colors is not None: + if colors.shape[0] != len(scene_bbox): + colors = [colors for _ in range(len(scene_bbox))] + colors = np.array(colors).astype(np.uint8) + assert colors.shape[0] == len(scene_bbox) + assert colors.shape[1] == 4 + + scene = trimesh.scene.Scene() + for idx, box in enumerate(scene_bbox): + box_tr = convert_oriented_box_to_trimesh_fmt(box) + if colors is not None: + box_tr.visual.main_color[:] = colors[idx] + box_tr.visual.vertex_colors[:] = colors[idx] + for facet in box_tr.facets: + box_tr.visual.face_colors[facet] = colors[idx] + scene.add_geometry(box_tr) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply') + + return + + +def write_oriented_bbox_camera_coord(scene_bbox, out_filename): + """Export oriented (around Y axis) scene bbox to meshes + Args: + scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz) + and heading angle around Y axis. + Z forward, X rightward, Y downward. heading angle of positive X is 0, + heading angle of negative Z is 90 degrees. + out_filename: (string) filename + """ + + def heading2rotmat(heading_angle): + pass + rotmat = np.zeros((3, 3)) + rotmat[1, 1] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0, :] = np.array([cosval, 0, sinval]) + rotmat[2, :] = np.array([-sinval, 0, cosval]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply') + + return + + +def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64): + """Create lines represented as cylinders connecting pairs of 3D points + Args: + pcl: (N x 2 x 3 numpy array): N pairs of xyz pos + filename: (string) filename for the output mesh (ply) file + rad: radius for the cylinder + res: number of sections used to create the cylinder + """ + scene = trimesh.scene.Scene() + for src, tgt in pcl: + # compute line + vec = tgt - src + M = trimesh.geometry.align_vectors([0, 0, 1], vec, False) + vec = tgt - src # compute again since align_vectors modifies vec in-place! + M[:3, 3] = 0.5 * src + 0.5 * tgt + height = np.sqrt(np.dot(vec, vec)) + scene.add_geometry( + trimesh.creation.cylinder(radius=rad, + height=height, + sections=res, + transform=M)) + mesh_list = trimesh.util.concatenate(scene.dump()) + trimesh.io.export.export_mesh(mesh_list, + '%s.ply' % (filename), + file_type='ply') diff --git a/models/LL3DA/utils/proposal_parser.py b/models/LL3DA/utils/proposal_parser.py new file mode 100644 index 0000000..68ca416 --- /dev/null +++ b/models/LL3DA/utils/proposal_parser.py @@ -0,0 +1,209 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Helper functions and class to calculate Average Precisions for 3D object +detection.""" +import logging +import os +import sys +from collections import OrderedDict + +import numpy as np +import scipy.special as scipy_special +import torch +from utils.box_util import (extract_pc_in_box3d, flip_axis_to_camera_np, + get_3d_box, get_3d_box_batch) +from utils.eval_det import eval_det_multiprocessing, get_iou_obb +from utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls + + +def flip_axis_to_depth(pc): + pc2 = np.copy(pc) + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # depth X,Y,Z = cam X,Z,-Y + pc2[..., 2] *= -1 + return pc2 + + +def get_ap_config_dict( + remove_empty_box=True, + use_3d_nms=True, + nms_iou=0.25, + use_old_type_nms=False, + cls_nms=True, + per_class_proposal=True, + use_cls_confidence_only=False, + conf_thresh=0.05, + no_nms=False, + dataset_config=None, +): + """Default mAP evaluation settings for VoteNet.""" + + config_dict = { + 'remove_empty_box': remove_empty_box, + 'use_3d_nms': use_3d_nms, + 'nms_iou': nms_iou, + 'use_old_type_nms': use_old_type_nms, + 'cls_nms': cls_nms, + 'per_class_proposal': per_class_proposal, + 'use_cls_confidence_only': use_cls_confidence_only, + 'conf_thresh': conf_thresh, + 'no_nms': no_nms, + 'dataset_config': dataset_config, + } + return config_dict + + +def ez_extract_pc_in_box3d(pc, box3d): + box_min = box3d.min(0) # 3 + box_max = box3d.max(0) # 3 + per_axis_mask = np.hstack( + (pc > box_min.reshape(1, 3), pc < box_max.reshape(1, 3))) + per_point_mask = per_axis_mask.sum(-1) == 6 + pc_in_box = pc[per_point_mask] + inds = np.arange(len(pc))[per_point_mask] + return pc_in_box, inds + + +# This is exactly the same as VoteNet so that we can compare evaluations. +def parse_predictions(predicted_boxes, + sem_cls_probs, + objectness_probs, + point_cloud, + config_dict=get_ap_config_dict()): + """Parse predictions to OBB parameters and suppress overlapping boxes. + + Args: + end_points: dict + {point_clouds, center, heading_scores, heading_residuals, + size_scores, size_residuals, sem_cls_scores} + config_dict: dict + {dataset_config, remove_empty_box, use_3d_nms, nms_iou, + use_old_type_nms, conf_thresh, per_class_proposal} + + Returns: + batch_pred_map_cls: a list of len == batch size (BS) + [pred_list_i], i = 0, 1, ..., BS-1 + where pred_list_i = [(pred_sem_cls, box_params, box_score)_j] + where j = 0, ..., num of valid detections - 1 from sample input i + """ + + sem_cls_probs = sem_cls_probs.detach().cpu().numpy() # B,num_proposal,10 + pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal + pred_sem_cls = np.argmax(sem_cls_probs, -1) + obj_prob = objectness_probs.detach().cpu().numpy() + + pred_corners_3d_upright_camera = predicted_boxes.detach().cpu().numpy() + + K = pred_corners_3d_upright_camera.shape[1] # K==num_proposal + bsize = pred_corners_3d_upright_camera.shape[0] + nonempty_box_mask = np.ones((bsize, K)) + + if config_dict['remove_empty_box']: + # ------------------------------------- + # Remove predicted boxes without any point within them.. + batch_pc = point_cloud.cpu().numpy()[:, :, 0:3] # B,N,3 + for i in range(bsize): + pc = batch_pc[i, :, :] # (N,3) + for j in range(K): + box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3) + box3d = flip_axis_to_depth(box3d) + # pc_in_box, inds = extract_pc_in_box3d(pc, box3d) + pc_in_box, inds = ez_extract_pc_in_box3d(pc, box3d) + if len(pc_in_box) < 5: + nonempty_box_mask[i, j] = 0 + if nonempty_box_mask[i].sum() == 0: + nonempty_box_mask[i, obj_prob[i].argmax()] = 1 + # ------------------------------------- + + if 'no_nms' in config_dict and config_dict['no_nms']: + # pred_mask = np.ones((bsize, K)) + pred_mask = nonempty_box_mask + elif not config_dict['use_3d_nms']: + # ---------- NMS input: pred_with_prob in (B,K,7) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_2d_with_prob = np.zeros((K, 5)) + for j in range(K): + boxes_2d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_2d_with_prob[j, 2] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_2d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_2d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_2d_with_prob[j, 4] = obj_prob[i, j] + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + assert len(nonempty_box_inds) > 0 + pick = nms_2d_faster( + boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], + config_dict['use_old_type_nms'], + ) + assert len(pick) > 0 + pred_mask[i, nonempty_box_inds[pick]] = 1 + # ---------- NMS output: pred_mask in (B,K) ----------- + elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']): + # ---------- NMS input: pred_with_prob in (B,K,7) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_3d_with_prob = np.zeros((K, 7)) + for j in range(K): + boxes_3d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 2] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 4] = np.max( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 5] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 6] = obj_prob[i, j] + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + assert len(nonempty_box_inds) > 0 + pick = nms_3d_faster( + boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], + config_dict['use_old_type_nms'], + ) + assert len(pick) > 0 + pred_mask[i, nonempty_box_inds[pick]] = 1 + # ---------- NMS output: pred_mask in (B,K) ----------- + elif config_dict['use_3d_nms'] and config_dict['cls_nms']: + # ---------- NMS input: pred_with_prob in (B,K,8) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_3d_with_prob = np.zeros((K, 8)) + for j in range(K): + boxes_3d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 2] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 4] = np.max( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 5] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 6] = obj_prob[i, j] + boxes_3d_with_prob[j, 7] = pred_sem_cls[ + i, + j] # only suppress if the two boxes are of the same class!! + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + assert len(nonempty_box_inds) > 0 + pick = nms_3d_faster_samecls( + boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], + config_dict['use_old_type_nms'], + ) + assert len(pick) > 0 + pred_mask[i, nonempty_box_inds[pick]] = 1 + # ---------- NMS output: pred_mask in (B,K) ----------- + + return pred_mask diff --git a/models/LL3DA/utils/random_cuboid.py b/models/LL3DA/utils/random_cuboid.py new file mode 100644 index 0000000..b674983 --- /dev/null +++ b/models/LL3DA/utils/random_cuboid.py @@ -0,0 +1,97 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +import numpy as np + + +def check_aspect(crop_range, aspect_min): + xy_aspect = np.min(crop_range[:2]) / np.max(crop_range[:2]) + xz_aspect = np.min(crop_range[[0, 2]]) / np.max(crop_range[[0, 2]]) + yz_aspect = np.min(crop_range[1:]) / np.max(crop_range[1:]) + return ((xy_aspect >= aspect_min) or (xz_aspect >= aspect_min) + or (yz_aspect >= aspect_min)) + + +class RandomCuboid(object): + """RandomCuboid augmentation from DepthContrast. + + [https://arxiv.org/abs/2101.02691] We slightly modify this operation to + account for object detection. + + This augmentation randomly crops a cuboid from the input and ensures + that the cropped cuboid contains at least one bounding box + """ + + def __init__( + self, + min_points, + aspect=0.8, + min_crop=0.5, + max_crop=1.0, + box_filter_policy='center', + ): + self.aspect = aspect + self.min_crop = min_crop + self.max_crop = max_crop + self.min_points = min_points + self.box_filter_policy = box_filter_policy + + def __call__(self, point_cloud, target_boxes, per_point_labels=None): + range_xyz = np.max(point_cloud[:, 0:3], axis=0) - np.min( + point_cloud[:, 0:3], axis=0) + + for _ in range(100): + crop_range = self.min_crop + np.random.rand(3) * (self.max_crop - + self.min_crop) + if not check_aspect(crop_range, self.aspect): + continue + + sample_center = point_cloud[np.random.choice(len(point_cloud)), + 0:3] + + new_range = range_xyz * crop_range / 2.0 + + max_xyz = sample_center + new_range + min_xyz = sample_center - new_range + + upper_idx = (np.sum( + (point_cloud[:, 0:3] <= max_xyz).astype(np.int32), 1) == 3) + lower_idx = (np.sum( + (point_cloud[:, 0:3] >= min_xyz).astype(np.int32), 1) == 3) + + new_pointidx = (upper_idx) & (lower_idx) + + if np.sum(new_pointidx) < self.min_points: + continue + + new_point_cloud = point_cloud[new_pointidx, :] + + # filtering policy is the only modification from DepthContrast + if self.box_filter_policy == 'center': + # remove boxes whose center does not lie within the new_point_cloud + new_boxes = target_boxes + if ( + target_boxes.sum() > 0 + ): # ground truth contains no bounding boxes. Common in SUNRGBD. + box_centers = target_boxes[:, 0:3] + new_pc_min_max = np.min(new_point_cloud[:, 0:3], + axis=0), np.max( + new_point_cloud[:, 0:3], + axis=0) + keep_boxes = np.logical_and( + np.all(box_centers >= new_pc_min_max[0], axis=1), + np.all(box_centers <= new_pc_min_max[1], axis=1), + ) + if keep_boxes.sum() == 0: + # current data augmentation removes all boxes in the pointcloud. fail! + continue + new_boxes = target_boxes[keep_boxes] + if per_point_labels is not None: + new_per_point_labels = [ + x[new_pointidx] for x in per_point_labels + ] + else: + new_per_point_labels = None + # if we are here, all conditions are met. return boxes + return new_point_cloud, new_boxes, new_per_point_labels + + # fallback + return point_cloud, target_boxes, per_point_labels diff --git a/models/README.md b/models/README.md new file mode 100644 index 0000000..5309e7b --- /dev/null +++ b/models/README.md @@ -0,0 +1,121 @@ +## 3D Visual Grounding Models + +These are 3D visual grounding models adapted for the mmscan-devkit. Currently, two models have been released: EmbodiedScan and ScanRefer. + +### Scanrefer + +1. Follow the [Scanrefer](https://github.com/daveredrum/ScanRefer/blob/master/README.md) to setup the Env. For data preparation, you need not load the datasets, only need to download the [preprocessed GLoVE embeddings](https://kaldir.vc.in.tum.de/glove.p) (~990MB) and put them under `data/` + +2. Install MMScan API. + +3. Overwrite the `lib/config.py/CONF.PATH.OUTPUT` to your desired output directory. + +4. Run the following command to train Scanrefer (one GPU): + + ```bash + python -u scripts/train.py --use_color --epoch {10/25/50} + ``` + +5. Run the following command to evaluate Scanrefer (one GPU): + + ```bash + python -u scripts/train.py --use_color --eval_only --use_checkpoint "path/to/pth" + ``` + +### EmbodiedScan + +1. Follow the [EmbodiedScan](https://github.com/OpenRobotLab/EmbodiedScan/blob/main/README.md) to setup the Env. Download the [Multi-View 3D Detection model's weights](https://download.openmmlab.com/mim-example/embodiedscan/mv-3ddet.pth) and change the "load_from" path in the config file under `configs/grounding` to the path where the weights are saved. + +2. Install MMScan API. + +3. Run the following command to train EmbodiedScan (multiple GPU): + + ```bash + # Single GPU training + python tools/train.py configs/grounding/pcd_4xb24_mmscan_vg_num256.py --work-dir=path/to/save + + # Multiple GPU training + python tools/train.py configs/grounding/pcd_4xb24_mmscan_vg_num256.py --work-dir=path/to/save --launcher="pytorch" + ``` + +4. Run the following command to evaluate EmbodiedScan (multiple GPU): + + ```bash + # Single GPU testing + python tools/test.py configs/grounding/pcd_4xb24_mmscan_vg_num256.py path/to/load_pth + + # Multiple GPU testing + python tools/test.py configs/grounding/pcd_4xb24_mmscan_vg_num256.py path/to/load_pth --launcher="pytorch" + ``` + +## 3D Question Answering Models + +These are 3D question answering models adapted for the mmscan-devkit. Currently, two models have been released: LL3DA and LEO. + +### LL3DA + +1. Follow the [LL3DA](https://github.com/Open3DA/LL3DA/blob/main/README.md) to setup the Env. For data preparation, you need not load the datasets, only need to: + + (1) download the [release pre-trained weights.](https://huggingface.co/CH3COOK/LL3DA-weight-release/blob/main/ll3da-opt-1.3b.pth) and put them under `./pretrained` + + (2) Download the [pre-processed BERT embedding weights](https://huggingface.co/CH3COOK/bert-base-embedding/tree/main) and store them under the `./bert-base-embedding` folder + +2. Install MMScan API. + +3. Edit the config under `./scripts/opt-1.3b/eval.mmscanqa.sh` and `./scripts/opt-1.3b/tuning.mmscanqa.sh` + +4. Run the following command to train LL3DA (4 GPU): + + ```bash + bash scripts/opt-1.3b/tuning.mmscanqa.sh + ``` + +5. Run the following command to evaluate LL3DA (4 GPU): + + ```bash + bash scripts/opt-1.3b/eval.mmscanqa.sh + ``` + + Optinal: You can use the GPT evaluator by this after getting the result. + 'qa_pred_gt_val.json' will be generated under the checkpoint folder after evaluation and the tmp_path is used for temporarily storing. + + ```bash + python eval_utils/evaluate_gpt.py --file path/to/qa_pred_gt_val.json + --tmp_path path/to/tmp --api_key your_api_key --eval_size -1 + --nproc 4 + ``` + +### LEO + +1. Follow the [LEO](https://github.com/embodied-generalist/embodied-generalist/blob/main/README.md) to setup the Env. For data preparation, you need not load the datasets, only need to: + + (1) Download [Vicuna-7B](https://huggingface.co/huangjy-pku/vicuna-7b/tree/main) and update cfg_path in configs/llm/\*.yaml + + (2) Download the [sft_noact.pth](https://huggingface.co/datasets/huangjy-pku/LEO_data/tree/main) and store it under the `./weights` folder + +2. Install MMScan API. + +3. Edit the config under `scripts/train_tuning_mmscan.sh` and `scripts/test_tuning_mmscan.sh` + +4. Run the following command to train LEO (4 GPU): + + ```bash + bash scripts/train_tuning_mmscan.sh + ``` + +5. Run the following command to evaluate LEO (4 GPU): + + ```bash + bash scripts/test_tuning_mmscan.sh + ``` + + Optinal: You can use the GPT evaluator by this after getting the result. + 'test_embodied_scan_l_complete.json' will be generated under the checkpoint folder after evaluation and the tmp_path is used for temporarily storing. + + ```bash + python evaluator/GPT_eval.py --file path/to/test_embodied_scan_l_complete.json + --tmp_path path/to/tmp --api_key your_api_key --eval_size -1 + --nproc 4 + ``` + +PS : It is possible that LEO may encounter an "NaN" error in the MultiHeadAttentionSpatial module due to the training setup when training more epoches. ( no problem for 4GPU one epoch) diff --git a/models/Scanrefer/LICENSE b/models/Scanrefer/LICENSE new file mode 100644 index 0000000..8c21604 --- /dev/null +++ b/models/Scanrefer/LICENSE @@ -0,0 +1,83 @@ +ScanRefer +Copyright (c) 2019 Dave Zhenyu Chen, Angel X. Chang, Matthias Nießner + +ScanRefer is licensed under a +Creative Commons Attribution-NonCommercial-ShareAlike 3.0 Unported License. + +You should have received a copy of the license along with this +work. If not, see . + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. + + + + + + + + + +License + +THE WORK (AS DEFINED BELOW) IS PROVIDED UNDER THE TERMS OF THIS CREATIVE COMMONS PUBLIC LICENSE ("CCPL" OR "LICENSE"). THE WORK IS PROTECTED BY COPYRIGHT AND/OR OTHER APPLICABLE LAW. ANY USE OF THE WORK OTHER THAN AS AUTHORIZED UNDER THIS LICENSE OR COPYRIGHT LAW IS PROHIBITED. + +BY EXERCISING ANY RIGHTS TO THE WORK PROVIDED HERE, YOU ACCEPT AND AGREE TO BE BOUND BY THE TERMS OF THIS LICENSE. TO THE EXTENT THIS LICENSE MAY BE CONSIDERED TO BE A CONTRACT, THE LICENSOR GRANTS YOU THE RIGHTS CONTAINED HERE IN CONSIDERATION OF YOUR ACCEPTANCE OF SUCH TERMS AND CONDITIONS. + +1. Definitions + +"Adaptation" means a work based upon the Work, or upon the Work and other pre-existing works, such as a translation, adaptation, derivative work, arrangement of music or other alterations of a literary or artistic work, or phonogram or performance and includes cinematographic adaptations or any other form in which the Work may be recast, transformed, or adapted including in any form recognizably derived from the original, except that a work that constitutes a Collection will not be considered an Adaptation for the purpose of this License. For the avoidance of doubt, where the Work is a musical work, performance or phonogram, the synchronization of the Work in timed-relation with a moving image ("synching") will be considered an Adaptation for the purpose of this License. +"Collection" means a collection of literary or artistic works, such as encyclopedias and anthologies, or performances, phonograms or broadcasts, or other works or subject matter other than works listed in Section 1(g) below, which, by reason of the selection and arrangement of their contents, constitute intellectual creations, in which the Work is included in its entirety in unmodified form along with one or more other contributions, each constituting separate and independent works in themselves, which together are assembled into a collective whole. A work that constitutes a Collection will not be considered an Adaptation (as defined above) for the purposes of this License. +"Distribute" means to make available to the public the original and copies of the Work or Adaptation, as appropriate, through sale or other transfer of ownership. +"License Elements" means the following high-level license attributes as selected by Licensor and indicated in the title of this License: Attribution, Noncommercial, ShareAlike. +"Licensor" means the individual, individuals, entity or entities that offer(s) the Work under the terms of this License. +"Original Author" means, in the case of a literary or artistic work, the individual, individuals, entity or entities who created the Work or if no individual or entity can be identified, the publisher; and in addition (i) in the case of a performance the actors, singers, musicians, dancers, and other persons who act, sing, deliver, declaim, play in, interpret or otherwise perform literary or artistic works or expressions of folklore; (ii) in the case of a phonogram the producer being the person or legal entity who first fixes the sounds of a performance or other sounds; and, (iii) in the case of broadcasts, the organization that transmits the broadcast. +"Work" means the literary and/or artistic work offered under the terms of this License including without limitation any production in the literary, scientific and artistic domain, whatever may be the mode or form of its expression including digital form, such as a book, pamphlet and other writing; a lecture, address, sermon or other work of the same nature; a dramatic or dramatico-musical work; a choreographic work or entertainment in dumb show; a musical composition with or without words; a cinematographic work to which are assimilated works expressed by a process analogous to cinematography; a work of drawing, painting, architecture, sculpture, engraving or lithography; a photographic work to which are assimilated works expressed by a process analogous to photography; a work of applied art; an illustration, map, plan, sketch or three-dimensional work relative to geography, topography, architecture or science; a performance; a broadcast; a phonogram; a compilation of data to the extent it is protected as a copyrightable work; or a work performed by a variety or circus performer to the extent it is not otherwise considered a literary or artistic work. +"You" means an individual or entity exercising rights under this License who has not previously violated the terms of this License with respect to the Work, or who has received express permission from the Licensor to exercise rights under this License despite a previous violation. +"Publicly Perform" means to perform public recitations of the Work and to communicate to the public those public recitations, by any means or process, including by wire or wireless means or public digital performances; to make available to the public Works in such a way that members of the public may access these Works from a place and at a place individually chosen by them; to perform the Work to the public by any means or process and the communication to the public of the performances of the Work, including by public digital performance; to broadcast and rebroadcast the Work by any means including signs, sounds or images. +"Reproduce" means to make copies of the Work by any means including without limitation by sound or visual recordings and the right of fixation and reproducing fixations of the Work, including storage of a protected performance or phonogram in digital form or other electronic medium. +2. Fair Dealing Rights. Nothing in this License is intended to reduce, limit, or restrict any uses free from copyright or rights arising from limitations or exceptions that are provided for in connection with the copyright protection under copyright law or other applicable laws. + +3. License Grant. Subject to the terms and conditions of this License, Licensor hereby grants You a worldwide, royalty-free, non-exclusive, perpetual (for the duration of the applicable copyright) license to exercise the rights in the Work as stated below: + +to Reproduce the Work, to incorporate the Work into one or more Collections, and to Reproduce the Work as incorporated in the Collections; +to create and Reproduce Adaptations provided that any such Adaptation, including any translation in any medium, takes reasonable steps to clearly label, demarcate or otherwise identify that changes were made to the original Work. For example, a translation could be marked "The original work was translated from English to Spanish," or a modification could indicate "The original work has been modified."; +to Distribute and Publicly Perform the Work including as incorporated in Collections; and, +to Distribute and Publicly Perform Adaptations. +The above rights may be exercised in all media and formats whether now known or hereafter devised. The above rights include the right to make such modifications as are technically necessary to exercise the rights in other media and formats. Subject to Section 8(f), all rights not expressly granted by Licensor are hereby reserved, including but not limited to the rights described in Section 4(e). + +4. Restrictions. The license granted in Section 3 above is expressly made subject to and limited by the following restrictions: + +You may Distribute or Publicly Perform the Work only under the terms of this License. You must include a copy of, or the Uniform Resource Identifier (URI) for, this License with every copy of the Work You Distribute or Publicly Perform. You may not offer or impose any terms on the Work that restrict the terms of this License or the ability of the recipient of the Work to exercise the rights granted to that recipient under the terms of the License. You may not sublicense the Work. You must keep intact all notices that refer to this License and to the disclaimer of warranties with every copy of the Work You Distribute or Publicly Perform. When You Distribute or Publicly Perform the Work, You may not impose any effective technological measures on the Work that restrict the ability of a recipient of the Work from You to exercise the rights granted to that recipient under the terms of the License. This Section 4(a) applies to the Work as incorporated in a Collection, but this does not require the Collection apart from the Work itself to be made subject to the terms of this License. If You create a Collection, upon notice from any Licensor You must, to the extent practicable, remove from the Collection any credit as required by Section 4(d), as requested. If You create an Adaptation, upon notice from any Licensor You must, to the extent practicable, remove from the Adaptation any credit as required by Section 4(d), as requested. +You may Distribute or Publicly Perform an Adaptation only under: (i) the terms of this License; (ii) a later version of this License with the same License Elements as this License; (iii) a Creative Commons jurisdiction license (either this or a later license version) that contains the same License Elements as this License (e.g., Attribution-NonCommercial-ShareAlike 3.0 US) ("Applicable License"). You must include a copy of, or the URI, for Applicable License with every copy of each Adaptation You Distribute or Publicly Perform. You may not offer or impose any terms on the Adaptation that restrict the terms of the Applicable License or the ability of the recipient of the Adaptation to exercise the rights granted to that recipient under the terms of the Applicable License. You must keep intact all notices that refer to the Applicable License and to the disclaimer of warranties with every copy of the Work as included in the Adaptation You Distribute or Publicly Perform. When You Distribute or Publicly Perform the Adaptation, You may not impose any effective technological measures on the Adaptation that restrict the ability of a recipient of the Adaptation from You to exercise the rights granted to that recipient under the terms of the Applicable License. This Section 4(b) applies to the Adaptation as incorporated in a Collection, but this does not require the Collection apart from the Adaptation itself to be made subject to the terms of the Applicable License. +You may not exercise any of the rights granted to You in Section 3 above in any manner that is primarily intended for or directed toward commercial advantage or private monetary compensation. The exchange of the Work for other copyrighted works by means of digital file-sharing or otherwise shall not be considered to be intended for or directed toward commercial advantage or private monetary compensation, provided there is no payment of any monetary compensation in con-nection with the exchange of copyrighted works. +If You Distribute, or Publicly Perform the Work or any Adaptations or Collections, You must, unless a request has been made pursuant to Section 4(a), keep intact all copyright notices for the Work and provide, reasonable to the medium or means You are utilizing: (i) the name of the Original Author (or pseudonym, if applicable) if supplied, and/or if the Original Author and/or Licensor designate another party or parties (e.g., a sponsor institute, publishing entity, journal) for attribution ("Attribution Parties") in Licensor's copyright notice, terms of service or by other reasonable means, the name of such party or parties; (ii) the title of the Work if supplied; (iii) to the extent reasonably practicable, the URI, if any, that Licensor specifies to be associated with the Work, unless such URI does not refer to the copyright notice or licensing information for the Work; and, (iv) consistent with Section 3(b), in the case of an Adaptation, a credit identifying the use of the Work in the Adaptation (e.g., "French translation of the Work by Original Author," or "Screenplay based on original Work by Original Author"). The credit required by this Section 4(d) may be implemented in any reasonable manner; provided, however, that in the case of a Adaptation or Collection, at a minimum such credit will appear, if a credit for all contributing authors of the Adaptation or Collection appears, then as part of these credits and in a manner at least as prominent as the credits for the other contributing authors. For the avoidance of doubt, You may only use the credit required by this Section for the purpose of attribution in the manner set out above and, by exercising Your rights under this License, You may not implicitly or explicitly assert or imply any connection with, sponsorship or endorsement by the Original Author, Licensor and/or Attribution Parties, as appropriate, of You or Your use of the Work, without the separate, express prior written permission of the Original Author, Licensor and/or Attribution Parties. +For the avoidance of doubt: + +Non-waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme cannot be waived, the Licensor reserves the exclusive right to collect such royalties for any exercise by You of the rights granted under this License; +Waivable Compulsory License Schemes. In those jurisdictions in which the right to collect royalties through any statutory or compulsory licensing scheme can be waived, the Licensor reserves the exclusive right to collect such royalties for any exercise by You of the rights granted under this License if Your exercise of such rights is for a purpose or use which is otherwise than noncommercial as permitted under Section 4(c) and otherwise waives the right to collect royalties through any statutory or compulsory licensing scheme; and, +Voluntary License Schemes. The Licensor reserves the right to collect royalties, whether individually or, in the event that the Licensor is a member of a collecting society that administers voluntary licensing schemes, via that society, from any exercise by You of the rights granted under this License that is for a purpose or use which is otherwise than noncommercial as permitted under Section 4(c). +Except as otherwise agreed in writing by the Licensor or as may be otherwise permitted by applicable law, if You Reproduce, Distribute or Publicly Perform the Work either by itself or as part of any Adaptations or Collections, You must not distort, mutilate, modify or take other derogatory action in relation to the Work which would be prejudicial to the Original Author's honor or reputation. Licensor agrees that in those jurisdictions (e.g. Japan), in which any exercise of the right granted in Section 3(b) of this License (the right to make Adaptations) would be deemed to be a distortion, mutilation, modification or other derogatory action prejudicial to the Original Author's honor and reputation, the Licensor will waive or not assert, as appropriate, this Section, to the fullest extent permitted by the applicable national law, to enable You to reasonably exercise Your right under Section 3(b) of this License (right to make Adaptations) but not otherwise. +5. Representations, Warranties and Disclaimer + +UNLESS OTHERWISE MUTUALLY AGREED TO BY THE PARTIES IN WRITING AND TO THE FULLEST EXTENT PERMITTED BY APPLICABLE LAW, LICENSOR OFFERS THE WORK AS-IS AND MAKES NO REPRESENTATIONS OR WARRANTIES OF ANY KIND CONCERNING THE WORK, EXPRESS, IMPLIED, STATUTORY OR OTHERWISE, INCLUDING, WITHOUT LIMITATION, WARRANTIES OF TITLE, MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NONINFRINGEMENT, OR THE ABSENCE OF LATENT OR OTHER DEFECTS, ACCURACY, OR THE PRESENCE OF ABSENCE OF ERRORS, WHETHER OR NOT DISCOVERABLE. SOME JURISDICTIONS DO NOT ALLOW THE EXCLUSION OF IMPLIED WARRANTIES, SO THIS EXCLUSION MAY NOT APPLY TO YOU. + +6. Limitation on Liability. EXCEPT TO THE EXTENT REQUIRED BY APPLICABLE LAW, IN NO EVENT WILL LICENSOR BE LIABLE TO YOU ON ANY LEGAL THEORY FOR ANY SPECIAL, INCIDENTAL, CONSEQUENTIAL, PUNITIVE OR EXEMPLARY DAMAGES ARISING OUT OF THIS LICENSE OR THE USE OF THE WORK, EVEN IF LICENSOR HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH DAMAGES. + +7. Termination + +This License and the rights granted hereunder will terminate automatically upon any breach by You of the terms of this License. Individuals or entities who have received Adaptations or Collections from You under this License, however, will not have their licenses terminated provided such individuals or entities remain in full compliance with those licenses. Sections 1, 2, 5, 6, 7, and 8 will survive any termination of this License. +Subject to the above terms and conditions, the license granted here is perpetual (for the duration of the applicable copyright in the Work). Notwithstanding the above, Licensor reserves the right to release the Work under different license terms or to stop distributing the Work at any time; provided, however that any such election will not serve to withdraw this License (or any other license that has been, or is required to be, granted under the terms of this License), and this License will continue in full force and effect unless terminated as stated above. +8. Miscellaneous + +Each time You Distribute or Publicly Perform the Work or a Collection, the Licensor offers to the recipient a license to the Work on the same terms and conditions as the license granted to You under this License. +Each time You Distribute or Publicly Perform an Adaptation, Licensor offers to the recipient a license to the original Work on the same terms and conditions as the license granted to You under this License. +If any provision of this License is invalid or unenforceable under applicable law, it shall not affect the validity or enforceability of the remainder of the terms of this License, and without further action by the parties to this agreement, such provision shall be reformed to the minimum extent necessary to make such provision valid and enforceable. +No term or provision of this License shall be deemed waived and no breach consented to unless such waiver or consent shall be in writing and signed by the party to be charged with such waiver or consent. +This License constitutes the entire agreement between the parties with respect to the Work licensed here. There are no understandings, agreements or representations with respect to the Work not specified here. Licensor shall not be bound by any additional provisions that may appear in any communication from You. This License may not be modified without the mutual written agreement of the Licensor and You. +The rights granted under, and the subject matter referenced, in this License were drafted utilizing the terminology of the Berne Convention for the Protection of Literary and Artistic Works (as amended on September 28, 1979), the Rome Convention of 1961, the WIPO Copyright Treaty of 1996, the WIPO Performances and Phonograms Treaty of 1996 and the Universal Copyright Convention (as revised on July 24, 1971). These rights and subject matter take effect in the relevant jurisdiction in which the License terms are sought to be enforced according to the corresponding provisions of the implementation of those treaty provisions in the applicable national law. If the standard suite of rights granted under applicable copyright law includes additional rights not granted under this License, such additional rights are deemed to be included in the License; this License is not intended to restrict the license of any rights under applicable law. diff --git a/models/Scanrefer/benchmark/eval.py b/models/Scanrefer/benchmark/eval.py new file mode 100644 index 0000000..d910721 --- /dev/null +++ b/models/Scanrefer/benchmark/eval.py @@ -0,0 +1,175 @@ +import argparse +import json +import os +import sys + +import numpy as np +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from lib.config import CONF +from utils.box_util import box3d_iou + +SCANREFER_GT = json.load( + open(os.path.join(CONF.PATH.DATA, 'ScanRefer_filtered_test_gt_bbox.json'))) + + +def organize_gt(): + organized = {} + + for data in SCANREFER_GT: + scene_id = data['scene_id'] + object_id = data['object_id'] + ann_id = data['ann_id'] + + if scene_id not in organized: + organized[scene_id] = {} + + if object_id not in organized[scene_id]: + organized[scene_id][object_id] = {} + + if ann_id not in organized[scene_id][object_id]: + organized[scene_id][object_id][ann_id] = {} + + organized[scene_id][object_id][ann_id] = data + + return organized + + +def evaluate(args): + pred_path = os.path.join(CONF.PATH.OUTPUT, args.folder, 'pred.json') + if not os.path.isfile(pred_path): + print( + 'please run `benchmark/predict.py` first to generate bounding boxes' + ) + exit() + + organized_gt = organize_gt() + + with open(pred_path) as f: + predictions = json.load(f) + ious = [] + masks = [] + others = [] + print('evaluating...') + for data in tqdm(predictions): + scene_id = data['scene_id'] + object_id = data['object_id'] + ann_id = data['ann_id'] + pred_bbox = np.array(data['bbox']) + mask = data['unique_multiple'] + other = data['others'] + + try: + gt_bbox = np.array( + organized_gt[scene_id][object_id][ann_id]['bbox']) + # iou, _ = box3d_iou(pred_bbox, gt_bbox) + iou = box3d_iou(pred_bbox, gt_bbox) + + except KeyError: + iou = 0 + + ious.append(iou) + masks.append(mask) + others.append(other) + + # ious = np.array(ious) + # iou_rate_025 = ious[ious >= 0.25].shape[0] / ious.shape[0] + # iou_rate_05 = ious[ious >= 0.5].shape[0] / ious.shape[0] + + # print("\nAcc@0.25IoU: {}".format(iou_rate_025)) + # print("Acc@0.5IoU: {}".format(iou_rate_05)) + + ious = np.array(ious) + masks = np.array(masks) + others = np.array(others) + + multiple_dict = {'unique': 0, 'multiple': 1} + others_dict = {'not_in_others': 0, 'in_others': 1} + + # evaluation stats + stats = {k: np.sum(masks == v) for k, v in multiple_dict.items()} + stats['overall'] = masks.shape[0] + stats = {} + for k, v in multiple_dict.items(): + stats[k] = {} + for k_o, v_o in others_dict.items(): + stats[k][k_o] = np.sum( + np.logical_and(masks == v, others == v_o)) + + stats[k]['overall'] = np.sum(masks == v) + + stats['overall'] = {} + for k_o, v_o in others_dict.items(): + stats['overall'][k_o] = np.sum(others == v_o) + + stats['overall']['overall'] = masks.shape[0] + + # aggregate scores + scores = {} + for k, v in multiple_dict.items(): + for k_o in others_dict.keys(): + acc_025iou = ious[np.logical_and(np.logical_and(masks == multiple_dict[k], others == others_dict[k_o]), ious >= 0.25)].shape[0] \ + / ious[np.logical_and(masks == multiple_dict[k], others == others_dict[k_o])].shape[0] \ + if np.sum(np.logical_and(masks == multiple_dict[k], others == others_dict[k_o])) > 0 else 0 + acc_05iou = ious[np.logical_and(np.logical_and(masks == multiple_dict[k], others == others_dict[k_o]), ious >= 0.5)].shape[0] \ + / ious[np.logical_and(masks == multiple_dict[k], others == others_dict[k_o])].shape[0] \ + if np.sum(np.logical_and(masks == multiple_dict[k], others == others_dict[k_o])) > 0 else 0 + + if k not in scores: + scores[k] = {k_o: {} for k_o in others_dict.keys()} + + scores[k][k_o]['acc@0.25iou'] = acc_025iou + scores[k][k_o]['acc@0.5iou'] = acc_05iou + + acc_025iou = ious[np.logical_and(masks == multiple_dict[k], ious >= 0.25)].shape[0] \ + / ious[masks == multiple_dict[k]].shape[0] if np.sum(masks == multiple_dict[k]) > 0 else 0 + acc_05iou = ious[np.logical_and(masks == multiple_dict[k], ious >= 0.5)].shape[0] \ + / ious[masks == multiple_dict[k]].shape[0] if np.sum(masks == multiple_dict[k]) > 0 else 0 + + scores[k]['overall'] = {} + scores[k]['overall']['acc@0.25iou'] = acc_025iou + scores[k]['overall']['acc@0.5iou'] = acc_05iou + + scores['overall'] = {} + for k_o in others_dict.keys(): + acc_025iou = ious[np.logical_and(others == others_dict[k_o], ious >= 0.25)].shape[0] \ + / ious[others == others_dict[k_o]].shape[0] if np.sum(others == others_dict[k_o]) > 0 else 0 + acc_05iou = ious[np.logical_and(others == others_dict[k_o], ious >= 0.5)].shape[0] \ + / ious[others == others_dict[k_o]].shape[0] if np.sum(others == others_dict[k_o]) > 0 else 0 + + # aggregate + scores['overall'][k_o] = {} + scores['overall'][k_o]['acc@0.25iou'] = acc_025iou + scores['overall'][k_o]['acc@0.5iou'] = acc_05iou + + acc_025iou = ious[ious >= 0.25].shape[0] / ious.shape[0] + acc_05iou = ious[ious >= 0.5].shape[0] / ious.shape[0] + + # aggregate + scores['overall']['overall'] = {} + scores['overall']['overall']['acc@0.25iou'] = acc_025iou + scores['overall']['overall']['acc@0.5iou'] = acc_05iou + + # report + print('\nstats:') + for k_s in stats.keys(): + for k_o in stats[k_s].keys(): + print('{} | {}: {}'.format(k_s, k_o, stats[k_s][k_o])) + + for k_s in scores.keys(): + print('\n{}:'.format(k_s)) + for k_m in scores[k_s].keys(): + for metric in scores[k_s][k_m].keys(): + print('{} | {} | {}: {}'.format(k_s, k_m, metric, + scores[k_s][k_m][metric])) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--folder', + type=str, + help='Folder containing the model') + args = parser.parse_args() + + evaluate(args) diff --git a/models/Scanrefer/benchmark/predict.py b/models/Scanrefer/benchmark/predict.py new file mode 100644 index 0000000..bb2f7dc --- /dev/null +++ b/models/Scanrefer/benchmark/predict.py @@ -0,0 +1,250 @@ +import argparse +import importlib +import json +import os +import pickle +import sys +from copy import deepcopy +from datetime import datetime + +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from data.scannet.model_util_scannet import ScannetDatasetConfig +from lib.ap_helper import APCalculator, parse_groundtruths, parse_predictions +from lib.config import CONF +from lib.dataset import ScannetReferenceDataset +from lib.eval_helper import get_eval +from lib.loss_helper import get_loss +from lib.solver import Solver +from models.refnet import RefNet +from utils.box_util import get_3d_box + +SCANREFER_TEST = json.load( + open(os.path.join(CONF.PATH.DATA, 'ScanRefer_filtered_test.json'))) + + +def get_dataloader(args, scanrefer, all_scene_list, split, config): + dataset = ScannetReferenceDataset(scanrefer=scanrefer, + scanrefer_all_scene=all_scene_list, + split=split, + num_points=args.num_points, + use_color=args.use_color, + use_height=(not args.no_height), + use_normal=args.use_normal, + use_multiview=args.use_multiview) + print('predict for {} samples'.format(len(dataset))) + + dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False) + + return dataset, dataloader + + +def get_model(args, config): + # load model + input_channels = int(args.use_multiview) * 128 + int( + args.use_normal) * 3 + int( + args.use_color) * 3 + int(not args.no_height) + model = RefNet(num_class=config.num_class, + num_heading_bin=config.num_heading_bin, + num_size_cluster=config.num_size_cluster, + mean_size_arr=config.mean_size_arr, + num_proposal=args.num_proposals, + input_feature_dim=input_channels, + use_lang_classifier=(not args.no_lang_cls), + use_bidir=args.use_bidir).cuda() + + model_name = 'model.pth' + path = os.path.join(CONF.PATH.OUTPUT, args.folder, model_name) + model.load_state_dict(torch.load(path), strict=False) + model.eval() + + return model + + +def get_scannet_scene_list(split): + scene_list = sorted([ + line.rstrip() for line in open( + os.path.join(CONF.PATH.SCANNET_META, 'scannetv2_{}.txt'.format( + split))) + ]) + + return scene_list + + +def get_scanrefer(args): + scanrefer = SCANREFER_TEST + scene_list = sorted(list(set([data['scene_id'] for data in scanrefer]))) + scanrefer = [data for data in scanrefer if data['scene_id'] in scene_list] + + return scanrefer, scene_list + + +def predict(args): + print('predict bounding boxes...') + # constant + DC = ScannetDatasetConfig() + + # init training dataset + print('preparing data...') + scanrefer, scene_list = get_scanrefer(args) + + # dataloader + _, dataloader = get_dataloader(args, scanrefer, scene_list, 'test', DC) + + # model + model = get_model(args, DC) + + # config + POST_DICT = { + 'remove_empty_box': True, + 'use_3d_nms': True, + 'nms_iou': 0.25, + 'use_old_type_nms': False, + 'cls_nms': True, + 'per_class_proposal': True, + 'conf_thresh': 0.05, + 'dataset_config': DC + } if not args.no_nms else None + + # predict + print('predicting...') + pred_bboxes = [] + for data_dict in tqdm(dataloader): + for key in data_dict: + data_dict[key] = data_dict[key].cuda() + + # feed + data_dict = model(data_dict) + _, data_dict = get_loss(data_dict=data_dict, + config=DC, + detection=False, + reference=True) + + objectness_preds_batch = torch.argmax(data_dict['objectness_scores'], + 2).long() + + if POST_DICT: + _ = parse_predictions(data_dict, POST_DICT) + nms_masks = torch.LongTensor(data_dict['pred_mask']).cuda() + + # construct valid mask + pred_masks = (nms_masks * objectness_preds_batch == 1).float() + else: + # construct valid mask + pred_masks = (objectness_preds_batch == 1).float() + + pred_ref = torch.argmax(data_dict['cluster_ref'] * pred_masks, + 1) # (B,) + pred_center = data_dict['center'] # (B,K,3) + + pred_rot_mat = data_dict['rot_mat'] + # pred_heading_class = torch.argmax(data_dict['heading_scores'], -1) # B,num_proposal + # pred_heading_residual = torch.gather(data_dict['heading_residuals'], 2, pred_heading_class.unsqueeze(-1)) # B,num_proposal,1 + # pred_heading_class = pred_heading_class # B,num_proposal + # pred_heading_residual = pred_heading_residual.squeeze(2) # B,num_proposal + + pred_size_class = torch.argmax(data_dict['size_scores'], + -1) # B,num_proposal + pred_size_residual = torch.gather( + data_dict['size_residuals'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat( + 1, 1, 1, 3)) # B,num_proposal,1,3 + pred_size_class = pred_size_class + pred_size_residual = pred_size_residual.squeeze(2) # B,num_proposal,3 + + for i in range(pred_ref.shape[0]): + # compute the iou + pred_ref_idx = pred_ref[i] + pred_obb = DC.param2obb( # TODO yesname + pred_center[i, pred_ref_idx, 0:3].detach().cpu().numpy(), + pred_rot_mat[i, pred_ref_idx].detach().cpu().numpy(), + # pred_heading_class[i, pred_ref_idx].detach().cpu().numpy(), + # pred_heading_residual[i, pred_ref_idx].detach().cpu().numpy(), + pred_size_class[i, pred_ref_idx].detach().cpu().numpy(), + pred_size_residual[i, pred_ref_idx].detach().cpu().numpy()) + pred_bbox = get_3d_box(pred_obb[3:6], pred_obb[6], pred_obb[0:3]) + + # construct the multiple mask + multiple = data_dict['unique_multiple'][i].item() + + # construct the others mask + others = 1 if data_dict['object_cat'][i] == 17 else 0 + + # store data + scanrefer_idx = data_dict['scan_idx'][i].item() + pred_data = { + 'scene_id': scanrefer[scanrefer_idx]['scene_id'], + 'object_id': scanrefer[scanrefer_idx]['object_id'], + 'ann_id': scanrefer[scanrefer_idx]['ann_id'], + 'bbox': pred_bbox.tolist(), + 'unique_multiple': multiple, + 'others': others + } + pred_bboxes.append(pred_data) + + # dump + print('dumping...') + pred_path = os.path.join(CONF.PATH.OUTPUT, args.folder, 'pred.json') + with open(pred_path, 'w') as f: + json.dump(pred_bboxes, f, indent=4) + + print('done!') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--folder', + type=str, + help='Folder containing the model') + parser.add_argument('--gpu', type=str, help='gpu', default='0') + parser.add_argument('--batch_size', type=int, help='batch size', default=8) + parser.add_argument('--num_points', + type=int, + default=40000, + help='Point Number [default: 40000]') + parser.add_argument('--num_proposals', + type=int, + default=256, + help='Proposal number [default: 256]') + parser.add_argument('--seed', type=int, default=42, help='random seed') + parser.add_argument('--no_height', + action='store_true', + help='Do NOT use height signal in input.') + parser.add_argument('--no_lang_cls', + action='store_true', + help='Do NOT use language classifier.') + parser.add_argument( + '--no_nms', + action='store_true', + help='do NOT use non-maximum suppression for post-processing.') + parser.add_argument('--use_color', + action='store_true', + help='Use RGB color in input.') + parser.add_argument('--use_normal', + action='store_true', + help='Use RGB color in input.') + parser.add_argument('--use_multiview', + action='store_true', + help='Use multiview images.') + parser.add_argument('--use_bidir', + action='store_true', + help='Use bi-directional GRU.') + args = parser.parse_args() + + # setting + os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu + os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + # reproducibility + torch.manual_seed(args.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(args.seed) + + predict(args) diff --git a/models/Scanrefer/data/scannet/README.md b/models/Scanrefer/data/scannet/README.md new file mode 100644 index 0000000..c9ebad9 --- /dev/null +++ b/models/Scanrefer/data/scannet/README.md @@ -0,0 +1,12 @@ +# ScanNet Instructions + +To acquire the access to ScanNet dataset, Please refer to the [ScanNet project page](https://github.com/ScanNet/ScanNet) and follow the instructions there. You will get a `download-scannet.py` script after your request for the ScanNet dataset is approved. Note that only a subset of ScanNet is needed. Once you get `download-scannet.py`, please use the commands below to download the portion of ScanNet that is necessary for ScanRefer: + +```shell +python2 download-scannet.py -o data/scannet --type _vh_clean_2.ply +python2 download-scannet.py -o data/scannet --type .aggregation.json +python2 download-scannet.py -o data/scannet --type _vh_clean_2.0.010000.segs.json +python2 download-scannet.py -o data/scannet --type .txt +``` + +Roughly 10.6GB free space is needed on your disk. diff --git a/models/Scanrefer/data/scannet/batch_load_scannet_data.py b/models/Scanrefer/data/scannet/batch_load_scannet_data.py new file mode 100644 index 0000000..bd63be9 --- /dev/null +++ b/models/Scanrefer/data/scannet/batch_load_scannet_data.py @@ -0,0 +1,92 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/batch_load_scannet_data.py. + +Batch mode in loading Scannet scenes with vertices and ground truth labels for semantic and instance segmentations + +Usage example: python ./batch_load_scannet_data.py +""" + +import datetime +import os +from multiprocessing import Pool + +import numpy as np +from load_scannet_data import export + +SCANNET_DIR = 'scans' +SCAN_NAMES = sorted( + [line.rstrip() for line in open('meta_data/scannetv2.txt')]) +LABEL_MAP_FILE = 'meta_data/scannetv2-labels.combined.tsv' +DONOTCARE_CLASS_IDS = np.array([]) +OBJ_CLASS_IDS = np.array([ + 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 23, + 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40 +]) # exclude wall (1), floor (2), ceiling (22) +MAX_NUM_POINT = 50000 +OUTPUT_FOLDER = './scannet_data' + + +def export_one_scan(scan_name): + output_filename_prefix = os.path.join(OUTPUT_FOLDER, scan_name) + mesh_file = os.path.join(SCANNET_DIR, scan_name, + scan_name + '_vh_clean_2.ply') + agg_file = os.path.join(SCANNET_DIR, scan_name, + scan_name + '.aggregation.json') + seg_file = os.path.join(SCANNET_DIR, scan_name, + scan_name + '_vh_clean_2.0.010000.segs.json') + meta_file = os.path.join( + SCANNET_DIR, scan_name, scan_name + + '.txt') # includes axisAlignment info for the train set scans. + mesh_vertices, aligned_vertices, semantic_labels, instance_labels, instance_bboxes, aligned_instance_bboxes = export( + mesh_file, agg_file, seg_file, meta_file, LABEL_MAP_FILE, None) + + mask = np.logical_not(np.in1d(semantic_labels, DONOTCARE_CLASS_IDS)) + mesh_vertices = mesh_vertices[mask, :] + aligned_vertices = aligned_vertices[mask, :] + semantic_labels = semantic_labels[mask] + instance_labels = instance_labels[mask] + + if instance_bboxes.shape[0] > 1: + num_instances = len(np.unique(instance_labels)) + print('Num of instances: ', num_instances) + + # bbox_mask = np.in1d(instance_bboxes[:,-1], OBJ_CLASS_IDS) + bbox_mask = np.in1d(instance_bboxes[:, -2], + OBJ_CLASS_IDS) # match the mesh2cap + instance_bboxes = instance_bboxes[bbox_mask, :] + aligned_instance_bboxes = aligned_instance_bboxes[bbox_mask, :] + print('Num of care instances: ', instance_bboxes.shape[0]) + else: + print('No semantic/instance annotation for test scenes') + + N = mesh_vertices.shape[0] + if N > MAX_NUM_POINT: + choices = np.random.choice(N, MAX_NUM_POINT, replace=False) + mesh_vertices = mesh_vertices[choices, :] + aligned_vertices = aligned_vertices[choices, :] + semantic_labels = semantic_labels[choices] + instance_labels = instance_labels[choices] + + print('Shape of points: {}'.format(mesh_vertices.shape)) + + np.save(output_filename_prefix + '_vert.npy', mesh_vertices) + np.save(output_filename_prefix + '_aligned_vert.npy', aligned_vertices) + np.save(output_filename_prefix + '_sem_label.npy', semantic_labels) + np.save(output_filename_prefix + '_ins_label.npy', instance_labels) + np.save(output_filename_prefix + '_bbox.npy', instance_bboxes) + np.save(output_filename_prefix + '_aligned_bbox.npy', + aligned_instance_bboxes) + + +def batch_export(): + + if not os.path.exists(OUTPUT_FOLDER): + print('Creating new data folder: {}'.format(OUTPUT_FOLDER)) + os.mkdir(OUTPUT_FOLDER) + + with Pool() as pool: + pool.map(export_one_scan, SCAN_NAMES) + + +if __name__ == '__main__': + batch_export() diff --git a/models/Scanrefer/data/scannet/load_scannet_data.py b/models/Scanrefer/data/scannet/load_scannet_data.py new file mode 100644 index 0000000..f4a5722 --- /dev/null +++ b/models/Scanrefer/data/scannet/load_scannet_data.py @@ -0,0 +1,207 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/load_scannet_data.py. + +Load Scannet scenes with vertices and ground truth labels for semantic and +instance segmentations +""" + +import argparse +import inspect +import json +# python imports +import math +import os +import pdb +import sys + +import numpy as np +import scannet_utils + + +def read_aggregation(filename): + object_id_to_segs = {} + label_to_segs = {} + with open(filename) as f: + data = json.load(f) + num_objects = len(data['segGroups']) + for i in range(num_objects): + object_id = data['segGroups'][i][ + 'objectId'] + 1 # instance ids should be 1-indexed + label = data['segGroups'][i]['label'] + segs = data['segGroups'][i]['segments'] + object_id_to_segs[object_id] = segs + if label in label_to_segs: + label_to_segs[label].extend(segs) + else: + label_to_segs[label] = segs + return object_id_to_segs, label_to_segs + + +def read_segmentation(filename): + seg_to_verts = {} + with open(filename) as f: + data = json.load(f) + num_verts = len(data['segIndices']) + for i in range(num_verts): + seg_id = data['segIndices'][i] + if seg_id in seg_to_verts: + seg_to_verts[seg_id].append(i) + else: + seg_to_verts[seg_id] = [i] + return seg_to_verts, num_verts + + +def export(mesh_file, + agg_file, + seg_file, + meta_file, + label_map_file, + output_file=None): + """points are XYZ RGB (RGB in 0-255), semantic label as nyu40 ids, instance + label as 1-#instance, box as (cx,cy,cz,dx,dy,dz,semantic_label)""" + label_map = scannet_utils.read_label_mapping(label_map_file, + label_from='raw_category', + label_to='nyu40id') + # mesh_vertices = scannet_utils.read_mesh_vertices_rgb(mesh_file) + mesh_vertices = scannet_utils.read_mesh_vertices_rgb_normal(mesh_file) + + # Load scene axis alignment matrix + lines = open(meta_file).readlines() + axis_align_matrix = None + for line in lines: + if 'axisAlignment' in line: + axis_align_matrix = [ + float(x) + for x in line.rstrip().strip('axisAlignment = ').split(' ') + ] + + if axis_align_matrix != None: + axis_align_matrix = np.array(axis_align_matrix).reshape((4, 4)) + pts = np.ones((mesh_vertices.shape[0], 4)) + pts[:, 0:3] = mesh_vertices[:, 0:3] + pts = np.dot(pts, axis_align_matrix.transpose()) # Nx4 + aligned_vertices = np.copy(mesh_vertices) + aligned_vertices[:, 0:3] = pts[:, 0:3] + else: + print('No axis alignment matrix found') + aligned_vertices = mesh_vertices + + # Load semantic and instance labels + if os.path.isfile(agg_file): + object_id_to_segs, label_to_segs = read_aggregation(agg_file) + seg_to_verts, num_verts = read_segmentation(seg_file) + + label_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + object_id_to_label_id = {} + for label, segs in label_to_segs.items(): + label_id = label_map[label] + for seg in segs: + verts = seg_to_verts[seg] + label_ids[verts] = label_id + instance_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + num_instances = len(np.unique(list(object_id_to_segs.keys()))) + for object_id, segs in object_id_to_segs.items(): + for seg in segs: + verts = seg_to_verts[seg] + instance_ids[verts] = object_id + if object_id not in object_id_to_label_id: + object_id_to_label_id[object_id] = label_ids[verts][0] + + instance_bboxes = np.zeros( + (num_instances, 8)) # also include object id + aligned_instance_bboxes = np.zeros( + (num_instances, 8)) # also include object id + for obj_id in object_id_to_segs: + label_id = object_id_to_label_id[obj_id] + + # bboxes in the original meshes + obj_pc = mesh_vertices[instance_ids == obj_id, 0:3] + if len(obj_pc) == 0: continue + # Compute axis aligned box + # An axis aligned bounding box is parameterized by + # (cx,cy,cz) and (dx,dy,dz) and label id + # where (cx,cy,cz) is the center point of the box, + # dx is the x-axis length of the box. + xmin = np.min(obj_pc[:, 0]) + ymin = np.min(obj_pc[:, 1]) + zmin = np.min(obj_pc[:, 2]) + xmax = np.max(obj_pc[:, 0]) + ymax = np.max(obj_pc[:, 1]) + zmax = np.max(obj_pc[:, 2]) + bbox = np.array([(xmin + xmax) / 2, (ymin + ymax) / 2, + (zmin + zmax) / 2, xmax - xmin, ymax - ymin, + zmax - zmin, label_id, + obj_id - 1]) # also include object id + # NOTE: this assumes obj_id is in 1,2,3,.,,,.NUM_INSTANCES + instance_bboxes[obj_id - 1, :] = bbox + + # bboxes in the aligned meshes + obj_pc = aligned_vertices[instance_ids == obj_id, 0:3] + if len(obj_pc) == 0: continue + # Compute axis aligned box + # An axis aligned bounding box is parameterized by + # (cx,cy,cz) and (dx,dy,dz) and label id + # where (cx,cy,cz) is the center point of the box, + # dx is the x-axis length of the box. + xmin = np.min(obj_pc[:, 0]) + ymin = np.min(obj_pc[:, 1]) + zmin = np.min(obj_pc[:, 2]) + xmax = np.max(obj_pc[:, 0]) + ymax = np.max(obj_pc[:, 1]) + zmax = np.max(obj_pc[:, 2]) + bbox = np.array([(xmin + xmax) / 2, (ymin + ymax) / 2, + (zmin + zmax) / 2, xmax - xmin, ymax - ymin, + zmax - zmin, label_id, + obj_id - 1]) # also include object id + # NOTE: this assumes obj_id is in 1,2,3,.,,,.NUM_INSTANCES + aligned_instance_bboxes[obj_id - 1, :] = bbox + else: + # use zero as placeholders for the test scene + print('use placeholders') + num_verts = mesh_vertices.shape[0] + label_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + instance_ids = np.zeros(shape=(num_verts), + dtype=np.uint32) # 0: unannotated + instance_bboxes = np.zeros((1, 8)) # also include object id + aligned_instance_bboxes = np.zeros((1, 8)) # also include object id + + if output_file is not None: + np.save(output_file + '_vert.npy', mesh_vertices) + np.save(output_file + '_aligned_vert.npy', aligned_vertices) + np.save(output_file + '_sem_label.npy', label_ids) + np.save(output_file + '_ins_label.npy', instance_ids) + np.save(output_file + '_bbox.npy', instance_bboxes) + np.save(output_file + '_aligned_bbox.npy', instance_bboxes) + + return mesh_vertices, aligned_vertices, label_ids, instance_ids, instance_bboxes, aligned_instance_bboxes + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + '--scan_path', + required=True, + help='path to scannet scene (e.g., data/ScanNet/v2/scene0000_00') + parser.add_argument('--output_file', required=True, help='output file') + parser.add_argument('--label_map_file', + required=True, + help='path to scannetv2-labels.combined.tsv') + opt = parser.parse_args() + + scan_name = os.path.split(opt.scan_path)[-1] + mesh_file = os.path.join(opt.scan_path, scan_name + '_vh_clean_2.ply') + agg_file = os.path.join(opt.scan_path, scan_name + '.aggregation.json') + seg_file = os.path.join(opt.scan_path, + scan_name + '_vh_clean_2.0.010000.segs.json') + meta_file = os.path.join( + opt.scan_path, scan_name + + '.txt') # includes axisAlignment info for the train set scans. + export(mesh_file, agg_file, seg_file, meta_file, opt.label_map_file, + opt.output_file) + + +if __name__ == '__main__': + main() diff --git a/models/Scanrefer/data/scannet/meta_data/es_type_id.py b/models/Scanrefer/data/scannet/meta_data/es_type_id.py new file mode 100644 index 0000000..54f542c --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/es_type_id.py @@ -0,0 +1,290 @@ +es_type_dict = { + 'adhesive tape': 1, + 'air conditioner': 2, + 'alarm': 3, + 'album': 4, + 'arch': 5, + 'backpack': 6, + 'bag': 7, + 'balcony': 8, + 'ball': 9, + 'banister': 10, + 'bar': 11, + 'barricade': 12, + 'baseboard': 13, + 'basin': 14, + 'basket': 15, + 'bathtub': 16, + 'beam': 17, + 'beanbag': 18, + 'bed': 19, + 'bench': 20, + 'bicycle': 21, + 'bidet': 22, + 'bin': 23, + 'blackboard': 24, + 'blanket': 25, + 'blinds': 26, + 'board': 27, + 'body loofah': 28, + 'book': 29, + 'boots': 30, + 'bottle': 31, + 'bowl': 32, + 'box': 33, + 'bread': 34, + 'broom': 35, + 'brush': 36, + 'bucket': 37, + 'cabinet': 38, + 'calendar': 39, + 'camera': 40, + 'can': 41, + 'candle': 42, + 'candlestick': 43, + 'cap': 44, + 'car': 45, + 'carpet': 46, + 'cart': 47, + 'case': 48, + 'ceiling': 49, + 'chair': 50, + 'chandelier': 51, + 'cleanser': 52, + 'clock': 53, + 'clothes': 54, + 'clothes dryer': 55, + 'coat hanger': 56, + 'coffee maker': 57, + 'coil': 58, + 'column': 59, + 'commode': 60, + 'computer': 61, + 'conducting wire': 62, + 'container': 63, + 'control': 64, + 'copier': 65, + 'cosmetics': 66, + 'couch': 67, + 'counter': 68, + 'countertop': 69, + 'crate': 70, + 'crib': 71, + 'cube': 72, + 'cup': 73, + 'curtain': 74, + 'cushion': 75, + 'decoration': 76, + 'desk': 77, + 'detergent': 78, + 'device': 79, + 'dish rack': 80, + 'dishwasher': 81, + 'dispenser': 82, + 'divider': 83, + 'door': 84, + 'door knob': 85, + 'doorframe': 86, + 'doorway': 87, + 'drawer': 88, + 'dress': 89, + 'dresser': 90, + 'drum': 91, + 'duct': 92, + 'dumbbell': 93, + 'dustpan': 94, + 'dvd': 95, + 'eraser': 96, + 'excercise equipment': 97, + 'fan': 98, + 'faucet': 99, + 'fence': 100, + 'file': 101, + 'fire extinguisher': 102, + 'fireplace': 103, + 'floor': 104, + 'flowerpot': 105, + 'flush': 106, + 'folder': 107, + 'food': 108, + 'footstool': 109, + 'frame': 110, + 'fruit': 111, + 'furniture': 112, + 'garage door': 113, + 'garbage': 114, + 'glass': 115, + 'globe': 116, + 'glove': 117, + 'grab bar': 118, + 'grass': 119, + 'guitar': 120, + 'hair dryer': 121, + 'hamper': 122, + 'handle': 123, + 'hanger': 124, + 'hat': 125, + 'headboard': 126, + 'headphones': 127, + 'heater': 128, + 'helmets': 129, + 'holder': 130, + 'hook': 131, + 'humidifier': 132, + 'ironware': 133, + 'jacket': 134, + 'jalousie': 135, + 'jar': 136, + 'kettle': 137, + 'keyboard': 138, + 'kitchen island': 139, + 'kitchenware': 140, + 'knife': 141, + 'label': 142, + 'ladder': 143, + 'lamp': 144, + 'laptop': 145, + 'ledge': 146, + 'letter': 147, + 'light': 148, + 'luggage': 149, + 'machine': 150, + 'magazine': 151, + 'mailbox': 152, + 'map': 153, + 'mask': 154, + 'mat': 155, + 'mattress': 156, + 'menu': 157, + 'microwave': 158, + 'mirror': 159, + 'molding': 160, + 'monitor': 161, + 'mop': 162, + 'mouse': 163, + 'napkins': 164, + 'notebook': 165, + 'object': 166, + 'ottoman': 167, + 'oven': 168, + 'pack': 169, + 'package': 170, + 'pad': 171, + 'pan': 172, + 'panel': 173, + 'paper': 174, + 'paper cutter': 175, + 'partition': 176, + 'pedestal': 177, + 'pen': 178, + 'person': 179, + 'piano': 180, + 'picture': 181, + 'pillar': 182, + 'pillow': 183, + 'pipe': 184, + 'pitcher': 185, + 'plant': 186, + 'plate': 187, + 'player': 188, + 'plug': 189, + 'plunger': 190, + 'pool': 191, + 'pool table': 192, + 'poster': 193, + 'pot': 194, + 'price tag': 195, + 'printer': 196, + 'projector': 197, + 'purse': 198, + 'rack': 199, + 'radiator': 200, + 'radio': 201, + 'rail': 202, + 'range hood': 203, + 'refrigerator': 204, + 'remote control': 205, + 'ridge': 206, + 'rod': 207, + 'roll': 208, + 'roof': 209, + 'rope': 210, + 'sack': 211, + 'salt': 212, + 'scale': 213, + 'scissors': 214, + 'screen': 215, + 'seasoning': 216, + 'shampoo': 217, + 'sheet': 218, + 'shelf': 219, + 'shirt': 220, + 'shoe': 221, + 'shovel': 222, + 'shower': 223, + 'sign': 224, + 'sink': 225, + 'soap': 226, + 'soap dish': 227, + 'soap dispenser': 228, + 'socket': 229, + 'speaker': 230, + 'sponge': 231, + 'spoon': 232, + 'stairs': 233, + 'stall': 234, + 'stand': 235, + 'stapler': 236, + 'statue': 237, + 'step': 238, + 'stick': 239, + 'stool': 240, + 'stopcock': 241, + 'stove': 242, + 'structure': 243, + 'sunglasses': 244, + 'support': 245, + 'switch': 246, + 'table': 247, + 'tablet': 248, + 'teapot': 249, + 'telephone': 250, + 'thermostat': 251, + 'tissue': 252, + 'tissue box': 253, + 'toaster': 254, + 'toilet': 255, + 'toilet paper': 256, + 'toiletry': 257, + 'tool': 258, + 'toothbrush': 259, + 'toothpaste': 260, + 'towel': 261, + 'toy': 262, + 'tray': 263, + 'treadmill': 264, + 'trophy': 265, + 'tube': 266, + 'tv': 267, + 'umbrella': 268, + 'urn': 269, + 'utensil': 270, + 'vacuum cleaner': 271, + 'vanity': 272, + 'vase': 273, + 'vent': 274, + 'ventilation': 275, + 'wall': 276, + 'wardrobe': 277, + 'washbasin': 278, + 'washing machine': 279, + 'water cooler': 280, + 'water heater': 281, + 'window': 282, + 'window frame': 283, + 'windowsill': 284, + 'wine': 285, + 'wire': 286, + 'wood': 287, + 'wrap': 288 +} diff --git a/models/Scanrefer/data/scannet/meta_data/mean_size_array.npy b/models/Scanrefer/data/scannet/meta_data/mean_size_array.npy new file mode 100755 index 0000000..fc02d85 Binary files /dev/null and b/models/Scanrefer/data/scannet/meta_data/mean_size_array.npy differ diff --git a/models/Scanrefer/data/scannet/meta_data/nyu40_labels.csv b/models/Scanrefer/data/scannet/meta_data/nyu40_labels.csv new file mode 100644 index 0000000..fc39f73 --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/nyu40_labels.csv @@ -0,0 +1,41 @@ +nyu40id,nyu40class,mappedId,mappedIdConsecutive,weight +1,wall,(ignore),19,0.0 +2,floor,(ignore),19,0.0 +3,cabinet,3,1,3.9644974086960434 +4,bed,4,2,5.459494152836571 +5,chair,5,3,2.241522691584157 +6,sofa,6,4,4.820655512680854 +7,table,7,5,3.565918577548873 +8,door,8,6,3.538498341919445 +9,window,9,7,4.636521236560596 +10,bookshelf,10,8,5.445050937449535 +11,picture,11,9,5.079250281008131 +12,counter,12,10,6.2030429647735845 +13,blinds,(ignore),19,0.0 +14,desk,14,11,4.622662494840168 +15,shelves,(ignore),19,0.0 +16,curtain,16,12,5.956294301248057 +17,dresser,(ignore),19,0.0 +18,pillow,(ignore),19,0.0 +19,mirror,(ignore),19,0.0 +20,floor_mat,(ignore),19,0.0 +21,clothes,(ignore),19,0.0 +22,ceiling,(ignore),19,0.0 +23,books,(ignore),19,0.0 +24,refridgerator,24,13,5.459141107819665 +25,television,(ignore),19,0.0 +26,paper,(ignore),19,0.0 +27,towel,(ignore),19,0.0 +28,shower_curtain,28,14,6.724871661883906 +29,box,(ignore),19,0.0 +30,whiteboard,(ignore),19,0.0 +31,person,(ignore),19,0.0 +32,night_stand,(ignore),19,0.0 +33,toilet,33,15,5.832442848923174 +34,sink,34,16,5.064773947290611 +35,lamp,(ignore),19,0.0 +36,bathtub,36,17,6.738988357113375 +37,bag,(ignore),19,0.0 +38,otherstructure,(ignore),19,0.0 +39,otherfurniture,39,18,3.375217918833916 +40,otherprop,(ignore),19,0.0 diff --git a/models/Scanrefer/data/scannet/meta_data/scannet_means.npz b/models/Scanrefer/data/scannet/meta_data/scannet_means.npz new file mode 100644 index 0000000..e57647c Binary files /dev/null and b/models/Scanrefer/data/scannet/meta_data/scannet_means.npz differ diff --git a/models/Scanrefer/data/scannet/meta_data/scannet_reference_means.npz b/models/Scanrefer/data/scannet/meta_data/scannet_reference_means.npz new file mode 100644 index 0000000..75b9cf2 Binary files /dev/null and b/models/Scanrefer/data/scannet/meta_data/scannet_reference_means.npz differ diff --git a/models/Scanrefer/data/scannet/meta_data/scannetv2-labels.combined.tsv b/models/Scanrefer/data/scannet/meta_data/scannetv2-labels.combined.tsv new file mode 100644 index 0000000..03ddbdc --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/scannetv2-labels.combined.tsv @@ -0,0 +1,608 @@ +id raw_category category count nyu40id eigen13id nyuClass nyu40class eigen13class ModelNet40 ModelNet10 ShapeNetCore55 synsetoffset wnsynsetid wnsynsetkey mpcat40 mpcat40index +1 wall wall 8277 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +2 chair chair 4646 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +22 books book 1678 23 2 book books Books n02870526 book.n.11 objects 39 +3 floor floor 1553 2 5 floor floor Floor n03365592 floor.n.01 floor 2 +5 door door 1483 8 12 door door Wall door n03221720 door.n.01 door 4 +1163 object object 1313 40 7 otherprop Objects objects 39 +16 window window 1209 9 13 window window Window n04587648 window.n.01 window 9 +4 table table 1170 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +56 trash can trash can 1090 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +13 pillow pillow 937 18 7 pillow pillow Objects pillow 3938244 n03938244 pillow.n.01 cushion 8 +15 picture picture 862 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +41 ceiling ceiling 806 22 3 ceiling ceiling Ceiling n02990373 ceiling.n.01 ceiling 17 +26 box box 775 29 7 box box Objects n02883344 box.n.01 objects 39 +161 doorframe doorframe 768 8 12 door door Wall door doorframe.n.01 door 4 +19 monitor monitor 765 40 7 monitor otherprop Objects monitor monitor tv or monitor 3211117 n03782190 monitor.n.04 objects 39 +7 cabinet cabinet 731 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +9 desk desk 680 14 10 desk desk Table desk desk table 4379243 n03179701 desk.n.01 table 5 +8 shelf shelf 641 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +10 office chair office chair 595 5 4 chair chair Chair chair chair chair 3001627 n04373704 swivel_chair.n.01 chair 3 +31 towel towel 570 27 7 towel towel Objects n04459362 towel.n.01 towel 20 +6 couch couch 502 6 9 sofa sofa Sofa sofa sofa sofa 4256520 n04256520 sofa.n.01 sofa 10 +14 sink sink 488 34 7 sink sink Objects sink n04223580 sink.n.01 sink 15 +48 backpack backpack 479 40 7 backpack otherprop Objects n02769748 backpack.n.01 objects 39 +28 lamp lamp 419 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +11 bed bed 370 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +18 bookshelf bookshelf 360 10 6 bookshelf bookshelf Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +71 mirror mirror 349 19 7 mirror mirror Objects n03773035 mirror.n.01 mirror 21 +21 curtain curtain 347 16 13 curtain curtain Window curtain n03151077 curtain.n.01 curtain 12 +40 plant plant 331 40 7 plant otherprop Objects plant n00017222 plant.n.02 plant 14 +52 whiteboard whiteboard 327 30 7 whiteboard whiteboard Objects n03211616 display_panel.n.01 board_panel 35 +96 radiator radiator 322 39 6 radiator otherfurniture Furniture n04041069 radiator.n.02 misc 40 +22 book book 318 23 2 book books Books n02870526 book.n.11 objects 39 +29 kitchen cabinet kitchen cabinet 310 3 6 cabinet cabinet Furniture n02933112 cabinet.n.01 cabinet 7 +49 toilet paper toilet paper 291 40 7 toilet paper otherprop Objects n15075141 toilet_tissue.n.01 objects 39 +29 kitchen cabinets kitchen cabinet 289 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +23 armchair armchair 281 5 4 chair chair Chair chair chair chair 3001627 n02738535 armchair.n.01 chair 3 +63 shoes shoe 272 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +24 coffee table coffee table 258 7 10 coffee table table Table table table table 4379243 n03063968 coffee_table.n.01 table 5 +17 toilet toilet 256 33 7 toilet toilet Objects toilet toilet n04446276 toilet.n.01 toilet 18 +47 bag bag 252 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +32 clothes clothes 248 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +46 keyboard keyboard 246 40 7 keyboard otherprop Objects keyboard computer keyboard 3085013 n03085013 computer_keyboard.n.01 objects 39 +65 bottle bottle 226 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +97 recycling bin recycling bin 225 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +34 nightstand nightstand 224 32 6 night stand night stand Furniture night_stand night_stand n03015254 chest_of_drawers.n.01 chest_of_drawers 13 +38 stool stool 221 40 7 stool otherprop Objects stool n04326896 stool.n.01 stool 19 +33 tv tv 219 25 11 television television TV tv or monitor 3211117 n03211117 display.n.06 tv_monitor 22 +75 file cabinet file cabinet 217 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +36 dresser dresser 213 17 6 dresser dresser Furniture dresser dresser n03015254 chest_of_drawers.n.01 chest_of_drawers 13 +64 computer tower computer tower 203 40 7 computer otherprop Objects n03082979 computer.n.01 objects 39 +32 clothing clothes 165 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +101 telephone telephone 164 40 7 telephone otherprop Objects telephone 4401088 n04401088 telephone.n.01 objects 39 +130 cup cup 157 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +27 refrigerator refrigerator 154 24 6 refrigerator refrigerator Furniture n04070727 refrigerator.n.01 appliances 37 +44 end table end table 147 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +131 jacket jacket 146 40 7 jacket otherprop Objects n03589791 jacket.n.01 clothes 38 +55 shower curtain shower curtain 144 28 7 shower curtain shower curtain Objects curtain n04209239 shower_curtain.n.01 curtain 12 +42 bathtub bathtub 144 36 7 bathtub bathtub Objects bathtub bathtub tub 2808440 n02808440 bathtub.n.01 bathtub 25 +59 microwave microwave 141 40 7 microwave otherprop Objects microwave 3761084 n03761084 microwave.n.02 appliances 37 +159 kitchen counter kitchen counter 140 12 6 counter counter Furniture table table table 4379243 n03116530 counter.n.01 counter 26 +74 sofa chair sofa chair 129 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +82 paper towel dispenser paper towel dispenser 129 40 7 paper towel dispenser otherprop Objects objects 39 +1164 bathroom vanity bathroom vanity 126 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 table 5 +93 suitcase suitcase 118 40 7 luggage otherprop Objects n02773838 bag.n.06 objects 39 +77 laptop laptop 111 40 7 laptop otherprop Objects laptop laptop 3642806 n03642806 laptop.n.01 objects 39 +67 ottoman ottoman 111 39 6 ottoman otherfurniture Furniture stool n03380724 footstool.n.01 stool 19 +128 shower walls shower wall 109 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +50 printer printer 106 40 7 printer otherprop Objects printer 4004475 n04004475 printer.n.03 appliances 37 +35 counter counter 104 12 6 counter counter Furniture table table table 4379243 n03116530 counter.n.01 counter 26 +69 board board 100 38 7 board otherstructure Objects board_panel 35 +100 soap dispenser soap dispenser 99 40 7 otherprop Objects n04254120 soap_dispenser.n.01 objects 39 +62 stove stove 95 38 7 stove otherstructure Objects stove 4330267 n04330267 stove.n.02 appliances 37 +105 light light 93 38 7 light otherstructure Objects n03665366 light.n.02 lighting 28 +1165 closet wall closet wall 90 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +165 mini fridge mini fridge 87 24 6 refrigerator refrigerator Furniture n03273913 electric_refrigerator.n.01 appliances 37 +7 cabinets cabinet 79 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +5 doors door 76 8 12 door door Wall door n03221720 door.n.01 door 4 +76 fan fan 75 40 7 fan otherprop Objects n03320046 fan.n.01 misc 40 +230 tissue box tissue box 73 40 7 tissue box otherprop Objects n02883344 box.n.01 objects 39 +54 blanket blanket 72 40 7 blanket otherprop Objects n02849154 blanket.n.01 objects 39 +125 bathroom stall bathroom stall 71 38 7 otherstructure Objects n02873839 booth.n.02 misc 40 +72 copier copier 70 40 7 otherprop Objects n03257586 duplicator.n.01 appliances 37 +68 bench bench 66 39 6 bench otherfurniture Furniture bench bench 2828884 n02828884 bench.n.01 seating 34 +145 bar bar 66 38 7 bar otherstructure Objects n02788689 bar.n.03 misc 40 +157 soap dish soap dish 65 40 7 soap dish otherprop Objects n04254009 soap_dish.n.01 objects 39 +1166 laundry hamper laundry hamper 65 40 7 laundry basket otherprop Objects objects 39 +132 storage bin storage bin 63 40 7 storage bin otherprop Objects objects 39 +1167 bathroom stall door bathroom stall door 62 8 12 door door Wall door n03221720 door.n.01 door 4 +232 light switch light switch 61 38 7 light switch otherstructure Objects n04372370 switch.n.01 misc 40 +134 coffee maker coffee maker 61 40 7 otherprop Objects n03063338 coffee_maker.n.01 appliances 37 +51 tv stand tv stand 61 39 6 tv stand otherfurniture Furniture tv_stand n03290653 entertainment_center.n.01 furniture 36 +250 decoration decoration 60 40 7 otherprop Objects n03169390 decoration.n.01 misc 40 +1168 ceiling light ceiling light 59 38 7 light otherstructure Objects n03665366 light.n.02 lighting 28 +342 range hood range hood 59 38 7 range hood otherstructure Objects range_hood n04053677 range_hood.n.01 misc 40 +89 blackboard blackboard 58 38 7 blackboard otherstructure Objects n02846511 blackboard.n.01 board_panel 35 +103 clock clock 58 40 7 clock otherprop Objects clock 3046257 n03046257 clock.n.01 objects 39 +99 wardrobe closet wardrobe 54 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +95 rail rail 53 38 7 railing otherstructure Objects n04047401 railing.n.01 railing 30 +154 bulletin board bulletin board 53 38 7 board otherstructure Objects n03211616 display_panel.n.01 board_panel 35 +140 mat mat 52 20 5 floor mat floor mat Floor n03727837 mat.n.01 floor 2 +1169 trash bin trash bin 52 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +193 ledge ledge 51 38 7 otherstructure Objects n09337253 ledge.n.01 misc 40 +116 seat seat 49 39 6 furniture otherfurniture Furniture n04161981 seat.n.03 furniture 36 +202 mouse mouse 49 40 7 mouse otherprop Objects n03793489 mouse.n.04 objects 39 +73 basket basket 48 40 7 basket otherprop Objects basket 2801938 n02801938 basket.n.01 objects 39 +78 shower shower 48 38 7 otherstructure Objects n04208936 shower.n.01 shower 23 +1170 dumbbell dumbbell 48 40 7 otherprop Objects n03255030 dumbbell.n.01 objects 39 +79 paper paper 46 26 7 paper paper Objects n14974264 paper.n.01 objects 39 +80 person person 46 31 7 person person Objects person n05217688 person.n.02 misc 40 +141 windowsill windowsill 45 38 7 otherstructure Objects n04590263 windowsill.n.01 window 9 +57 closet closet 45 39 6 wardrobe otherfurniture Furniture wardrobe misc 40 +102 bucket bucket 45 40 7 bucket otherprop Objects n02909870 bucket.n.01 misc 40 +261 sign sign 44 40 7 sign otherprop Objects n04217882 signboard.n.01 objects 39 +118 speaker speaker 43 40 7 speaker otherprop Objects speaker 3691459 n03691459 loudspeaker.n.01 objects 39 +136 dishwasher dishwasher 43 38 7 dishwasher otherstructure Objects dishwasher 3207941 n03207941 dishwasher.n.01 appliances 37 +98 container container 43 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1171 stair rail stair rail 42 38 7 banister otherstructure Objects n02788148 bannister.n.02 railing 30 +170 shower curtain rod shower curtain rod 42 40 7 otherprop Objects curtain 12 +1172 tube tube 41 40 7 otherprop Objects misc 40 +1173 bathroom cabinet bathroom cabinet 39 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +79 papers paper 39 26 7 paper paper Objects n14974264 paper.n.01 objects 39 +221 storage container storage container 39 40 7 container otherprop Objects objects 39 +570 paper bag paper bag 39 37 7 bag bag Objects n04122825 sack.n.01 objects 39 +138 paper towel roll paper towel roll 39 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01 towel 20 +168 ball ball 39 40 7 ball otherprop Objects objects 39 +276 closet doors closet door 38 8 12 door door Wall door n03221720 door.n.01 door 4 +106 laundry basket laundry basket 37 40 7 laundry basket otherprop Objects basket 2801938 n03050864 clothes_hamper.n.01 objects 39 +214 cart cart 37 40 7 cart otherprop Objects n03484083 handcart.n.01 shelving 31 +276 closet door closet door 35 8 12 door door Wall door n03221720 door.n.01 door 4 +323 dish rack dish rack 35 40 7 dish rack otherprop Objects n03207630 dish_rack.n.01 objects 39 +58 stairs stairs 35 38 7 stairs otherstructure Objects n04298308 stairway.n.01 stairs 16 +86 blinds blinds 35 13 13 blinds blinds Window n02851099 blind.n.03 blinds 32 +2 stack of chairs chair 35 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +399 purse purse 34 40 7 purse otherprop Objects n02774152 bag.n.04 objects 39 +121 bicycle bicycle 33 40 7 bicycle otherprop Objects bicycle 2834778 n02834778 bicycle.n.01 objects 39 +185 tray tray 32 40 7 tray otherprop Objects n04476259 tray.n.01 objects 39 +300 plunger plunger 30 40 7 otherprop Objects n03970156 plunger.n.03 objects 39 +180 paper cutter paper cutter 30 40 7 paper cutter otherprop Objects n03886940 paper_cutter.n.01 objects 39 +163 toilet paper dispenser toilet paper dispenser 29 40 7 otherprop Objects objects 39 +26 boxes box 29 29 7 box box Objects n02883344 box.n.01 objects 39 +66 bin bin 28 40 7 bin otherprop Objects n02839910 bin.n.01 objects 39 +208 toilet seat cover dispenser toilet seat cover dispenser 28 40 7 otherprop Objects objects 39 +112 guitar guitar 28 40 7 guitar otherprop Objects guitar guitar 3467517 n03467517 guitar.n.01 objects 39 +540 mailboxes mailbox 28 29 7 box box Objects mailbox 3710193 n03710193 mailbox.n.01 misc 40 +395 handicap bar handicap bar 27 38 7 bar otherstructure Objects misc 40 +166 fire extinguisher fire extinguisher 27 40 7 fire extinguisher otherprop Objects n03345837 fire_extinguisher.n.01 misc 40 +122 ladder ladder 27 39 6 ladder otherfurniture Furniture stairs n03632277 ladder.n.01 stairs 16 +120 column column 26 38 7 column otherstructure Objects n03074380 column.n.06 column 24 +107 pipe pipe 25 40 7 pipe otherprop Objects n03944672 pipe.n.02 misc 40 +283 vacuum cleaner vacuum cleaner 25 40 7 otherprop Objects n04517823 vacuum.n.04 objects 39 +88 plate plate 24 40 7 plate otherprop Objects n03959485 plate.n.04 objects 39 +90 piano piano 24 39 6 piano otherfurniture Furniture piano piano 3928116 n03928116 piano.n.01 furniture 36 +177 water cooler water cooler 24 39 6 water cooler otherfurniture Furniture n04559166 water_cooler.n.01 misc 40 +1174 cd case cd case 24 40 7 otherprop Objects objects 39 +562 bowl bowl 24 40 7 bowl otherprop Objects bowl bowl 2880940 n02880940 bowl.n.03 objects 39 +1175 closet rod closet rod 24 40 7 otherprop Objects n04100174 rod.n.01 misc 40 +1156 bathroom counter bathroom counter 24 12 6 counter counter Furniture table table table 4379243 n03116530 counter.n.01 counter 26 +84 oven oven 23 38 7 oven otherstructure Objects n03862676 oven.n.01 appliances 37 +104 stand stand 23 39 6 stand otherfurniture Furniture table table table 4379243 n04301000 stand.n.04 table 5 +229 scale scale 23 40 7 scale otherprop Objects n04141975 scale.n.07 objects 39 +70 washing machine washing machine 23 39 6 washing machine otherfurniture Furniture washing_machine 4554684 n04554684 washer.n.03 appliances 37 +325 broom broom 22 40 7 broom otherprop Objects n02906734 broom.n.01 objects 39 +169 hat hat 22 40 7 hat otherprop Objects n03497657 hat.n.01 clothes 38 +128 shower wall shower wall 22 1 12 wall wall Wall n04208936 shower.n.01 wall 1 +331 guitar case guitar case 21 40 7 guitar case otherprop Objects objects 39 +87 rack rack 21 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +488 water pitcher water pitcher 21 40 7 pitcher otherprop Objects n03950228 pitcher.n.02 objects 39 +776 laundry detergent laundry detergent 21 40 7 otherprop Objects objects 39 +370 hair dryer hair dryer 21 40 7 hair dryer otherprop Objects n03483316 hand_blower.n.01 objects 39 +191 pillar pillar 21 38 7 column otherstructure Objects n03073977 column.n.07 column 24 +748 divider divider 20 40 7 otherprop Objects wall 1 +242 power outlet power outlet 19 40 7 otherprop Objects misc 40 +45 dining table dining table 19 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +417 shower floor shower floor 19 2 5 floor floor Floor n04208936 shower.n.01 floor 2 +70 washing machines washing machine 19 39 6 washing machine otherfurniture Furniture washing_machine 4554684 n04554684 washer.n.03 appliances 37 +188 shower door shower door 19 8 12 door door Wall door n04208936 shower.n.01 door 4 +1176 coffee kettle coffee kettle 18 40 7 pot otherprop Objects n03612814 kettle.n.01 objects 39 +1177 wardrobe cabinet wardrobe 18 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +1178 structure structure 18 38 7 otherstructure Objects misc 40 +18 bookshelves bookshelf 17 10 6 bookshelf bookshelf Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +110 clothes dryer clothes dryer 17 39 6 otherfurniture Furniture n03251766 dryer.n.01 appliances 37 +148 toaster toaster 17 40 7 toaster otherprop Objects n04442312 toaster.n.02 appliances 37 +63 shoe shoe 17 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +155 ironing board ironing board 16 39 6 ironing board otherfurniture Furniture n03586090 ironing_board.n.01 objects 39 +572 alarm clock alarm clock 16 40 7 alarm clock otherprop Objects clock 3046257 n02694662 alarm_clock.n.01 objects 39 +1179 shower head shower head 15 38 7 otherstructure Objects shower 23 +28 lamp base lamp 15 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +392 water bottle water bottle 15 40 7 bottle otherprop Objects bottle bottle 2876657 n04557648 water_bottle.n.01 objects 39 +1180 keyboard piano keyboard piano 15 39 6 piano otherfurniture Furniture piano piano 3928116 n03928116 piano.n.01 furniture 36 +609 projector screen projector screen 15 38 7 projector screen otherstructure Objects misc 40 +1181 case of water bottles case of water bottles 15 40 7 otherprop Objects objects 39 +195 toaster oven toaster oven 14 40 7 toaster oven otherprop Objects n04442441 toaster_oven.n.01 appliances 37 +581 music stand music stand 14 39 6 music stand otherfurniture Furniture n03801760 music_stand.n.01 furniture 36 +58 staircase stairs 14 38 7 stairs otherstructure Objects n04298308 stairway.n.01 stairs 16 +1182 coat rack coat rack 14 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 3 +1183 storage organizer storage organizer 14 40 7 otherprop Objects shelving 3 +139 machine machine 14 40 7 machine otherprop Objects n03699975 machine.n.01 appliances 37 +1184 folded chair folded chair 14 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1185 fire alarm fire alarm 14 40 7 otherprop Objects n03343737 fire_alarm.n.02 misc 40 +156 fireplace fireplace 13 38 7 fireplace otherstructure Objects n03346455 fireplace.n.01 fireplace 27 +408 vent vent 13 40 7 otherprop Objects n04526241 vent.n.01 misc 40 +213 furniture furniture 13 39 6 furniture otherfurniture Furniture n03405725 furniture.n.01 furniture 36 +1186 power strip power strip 13 40 7 otherprop Objects objects 39 +1187 calendar calendar 13 40 7 otherprop Objects objects 39 +1188 poster poster 13 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +115 toilet paper holder toilet paper holder 13 40 7 toilet paper holder otherprop Objects objects 39 +1189 potted plant potted plant 12 40 7 plant otherprop Objects plant n00017222 plant.n.02 plant 14 +304 stuffed animal stuffed animal 12 40 7 stuffed animal otherprop Objects n04399382 teddy.n.01 objects 39 +1190 luggage luggage 12 40 7 luggage otherprop Objects n02774630 baggage.n.01 objects 39 +21 curtains curtain 12 16 13 curtain curtain Window curtain n03151077 curtain.n.01 curtain 12 +312 headphones headphones 12 40 7 otherprop Objects n03261776 earphone.n.01 objects 39 +233 crate crate 12 39 6 crate otherfurniture Furniture n03127925 crate.n.01 objects 39 +286 candle candle 12 40 7 candle otherprop Objects lamp n02948072 candle.n.01 objects 39 +264 projector projector 12 40 7 projector otherprop Objects n04009552 projector.n.02 objects 39 +110 clothes dryers clothes dryer 12 39 6 otherfurniture Furniture n03251766 dryer.n.01 appliances 37 +1191 mattress mattress 12 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +356 dustpan dustpan 12 40 7 otherprop Objects n03259009 dustpan.n.02 objects 39 +25 drawer drawer 11 39 6 drawer otherfurniture Furniture n03233905 drawer.n.01 furniture 36 +750 rod rod 11 40 7 otherprop Objects pistol 3948459 n03427202 gat.n.01 misc 40 +269 globe globe 11 40 7 globe otherprop Objects objects 39 +307 footrest footrest 11 39 6 foot rest otherfurniture Furniture stool n03380724 footstool.n.01 stool 19 +410 piano bench piano bench 11 39 6 piano bench otherfurniture Furniture bench bench 2828884 n02828884 bench.n.01 seating 34 +730 breakfast bar breakfast bar 11 38 7 bar otherstructure Objects counter 26 +216 step stool step stool 11 40 7 step stool otherprop Objects stool n04315713 step_stool.n.01 stool 19 +1192 hand rail hand rail 11 38 7 railing otherstructure Objects railing 30 +119 vending machine vending machine 11 40 7 machine otherprop Objects n04525305 vending_machine.n.01 appliances 37 +682 ceiling fan ceiling fan 11 40 7 fan otherprop Objects n03320046 fan.n.01 misc 40 +434 swiffer swiffer 11 40 7 otherprop Objects objects 39 +126 foosball table foosball table 11 39 6 foosball table otherfurniture Furniture table table table 4379243 n04379243 table.n.02 table 5 +919 jar jar 11 40 7 jar otherprop Objects jar 3593526 n03593526 jar.n.01 objects 39 +85 footstool footstool 11 39 6 ottoman otherfurniture Furniture stool n03380724 footstool.n.01 stool 19 +1193 folded table folded table 10 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +108 round table round table 10 7 10 table table Table table table table 4379243 n04114554 round_table.n.02 table 5 +135 hamper hamper 10 40 7 basket otherprop Objects basket 2801938 n03482405 hamper.n.02 objects 39 +1194 poster tube poster tube 10 40 7 otherprop Objects objects 39 +432 case case 10 40 7 case otherprop Objects objects 39 +53 carpet carpet 10 40 7 rug otherprop Objects n04118021 rug.n.01 floor 2 +1195 thermostat thermostat 10 40 7 otherprop Objects n04422875 thermostat.n.01 misc 40 +111 coat coat 10 40 7 jacket otherprop Objects n03057021 coat.n.01 clothes 38 +305 water fountain water fountain 10 38 7 water fountain otherstructure Objects n03241335 drinking_fountain.n.01 misc 40 +1125 smoke detector smoke detector 10 40 7 otherprop Objects misc 40 +13 pillows pillow 9 18 7 pillow pillow Objects pillow 3938244 n03938244 pillow.n.01 cushion 8 +1196 flip flops flip flops 9 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +1197 cloth cloth 9 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +1198 banner banner 9 40 7 otherprop Objects n02788021 banner.n.01 misc 40 +1199 clothes hanger clothes hanger 9 40 7 otherprop Objects n03057920 coat_hanger.n.01 objects 39 +1200 whiteboard eraser whiteboard eraser 9 40 7 otherprop Objects objects 39 +378 iron iron 9 40 7 otherprop Objects n03584829 iron.n.04 objects 39 +591 instrument case instrument case 9 40 7 case otherprop Objects objects 39 +49 toilet paper rolls toilet paper 9 40 7 toilet paper otherprop Objects n15075141 toilet_tissue.n.01 objects 39 +92 soap soap 9 40 7 soap otherprop Objects n04253437 soap.n.01 objects 39 +1098 block block 9 40 7 otherprop Objects misc 40 +291 wall hanging wall hanging 8 40 7 otherprop Objects n03491178 hanging.n.01 picture 6 +1063 kitchen island kitchen island 8 38 7 kitchen island otherstructure Objects n03620600 kitchen_island.n.01 counter 26 +107 pipes pipe 8 38 7 otherstructure Objects misc 40 +1135 toothbrush toothbrush 8 40 7 toothbrush otherprop Objects n04453156 toothbrush.n.01 objects 39 +189 shirt shirt 8 40 7 otherprop Objects n04197391 shirt.n.01 clothes 38 +245 cutting board cutting board 8 40 7 cutting board otherprop Objects n03025513 chopping_board.n.01 objects 39 +194 vase vase 8 40 7 vase otherprop Objects vase jar 3593526 n04522168 vase.n.01 objects 39 +1201 shower control valve shower control valve 8 38 7 otherstructure Objects n04208936 shower.n.01 shower 23 +386 exercise machine exercise machine 8 40 7 machine otherprop Objects gym_equipment 33 +1202 compost bin compost bin 8 39 6 garbage bin otherfurniture Furniture trash_bin 2747177 n02747177 ashcan.n.01 objects 39 +857 shorts shorts 8 40 7 shorts otherprop Objects clothes 38 +452 tire tire 8 40 7 otherprop Objects n04440749 tire.n.01 objects 39 +1203 teddy bear teddy bear 7 40 7 stuffed animal otherprop Objects n04399382 teddy.n.01 objects 39 +346 bathrobe bathrobe 7 40 7 otherprop Objects n02807616 bathrobe.n.01 clothes 38 +152 handrail handrail 7 38 7 railing otherstructure Objects n02788148 bannister.n.02 railing 30 +83 faucet faucet 7 40 7 faucet otherprop Objects faucet 3325088 n03325088 faucet.n.01 misc 40 +1204 pantry wall pantry wall 7 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +726 thermos thermos 7 40 7 flask otherprop Objects bottle bottle 2876657 n04422727 thermos.n.01 objects 39 +61 rug rug 7 40 7 rug otherprop Objects n04118021 rug.n.01 floor 2 +39 couch cushions cushion 7 18 7 pillow pillow Objects n03151500 cushion.n.03 cushion 8 +1117 tripod tripod 7 39 6 stand otherfurniture Furniture n04485082 tripod.n.01 objects 39 +540 mailbox mailbox 7 29 7 box box Objects mailbox 3710193 n03710193 mailbox.n.01 misc 40 +1205 tupperware tupperware 7 40 7 otherprop Objects objects 39 +415 shoe rack shoe rack 7 40 7 shoe rack otherprop Objects shelving 31 +31 towels towel 6 27 7 towel towel Objects n04459362 towel.n.01 towel 20 +1206 beer bottles beer bottle 6 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +153 treadmill treadmill 6 39 6 treadmill otherfurniture Furniture n04477387 treadmill.n.01 gym_equipment 33 +1207 salt salt 6 40 7 otherprop Objects objects 39 +129 chest chest 6 39 6 chest otherfurniture Furniture dresser dresser chest_of_drawers 13 +220 dispenser dispenser 6 40 7 otherprop Objects n03210683 dispenser.n.01 objects 39 +1208 mirror doors mirror door 6 8 12 door door Wall door n03221720 door.n.01 door 4 +231 remote remote 6 40 7 otherprop Objects remote_control 4074963 n04074963 remote_control.n.01 objects 39 +1209 folded ladder folded ladder 6 39 6 ladder otherfurniture Furniture stairs n03632277 ladder.n.01 misc 40 +39 cushion cushion 6 18 7 pillow pillow Objects n03151500 cushion.n.03 cushion 8 +1210 carton carton 6 40 7 otherprop Objects objects 39 +117 step step 6 38 7 otherstructure Objects n04314914 step.n.04 misc 40 +822 drying rack drying rack 6 39 6 drying rack otherfurniture Furniture shelving 31 +238 slippers slipper 6 40 7 shoe otherprop Objects n04241394 slipper.n.01 clothes 38 +143 pool table pool table 6 39 6 pool table otherfurniture Furniture table table table 4379243 n03982430 pool_table.n.01 table 5 +1211 soda stream soda stream 6 40 7 otherprop Objects objects 39 +228 toilet brush toilet brush 6 40 7 toilet brush otherprop Objects objects 39 +494 loft bed loft bed 6 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +226 cooking pot cooking pot 6 40 7 pot otherprop Objects objects 39 +91 heater heater 6 39 6 heater otherfurniture Furniture n03508101 heater.n.01 misc 40 +1072 messenger bag messenger bag 6 37 7 bag bag Objects objects 39 +435 stapler stapler 6 40 7 stapler otherprop Objects n04303497 stapler.n.01 objects 39 +1165 closet walls closet wall 5 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +345 scanner scanner 5 40 7 otherprop Objects appliances 37 +893 elliptical machine elliptical machine 5 40 7 machine otherprop Objects gym_equipment 33 +621 kettle kettle 5 40 7 pot otherprop Objects n03612814 kettle.n.01 objects 39 +1212 metronome metronome 5 40 7 otherprop Objects n03757604 metronome.n.01 objects 39 +297 dumbell dumbell 5 40 7 otherprop Objects objects 39 +1213 music book music book 5 23 2 book books Books n02870526 book.n.11 objects 39 +1214 rice cooker rice cooker 5 40 7 otherprop Objects objects 39 +1215 dart board dart board 5 38 7 board otherstructure Objects n03162940 dartboard.n.01 objects 39 +529 sewing machine sewing machine 5 40 7 sewing machine otherprop Objects n04179913 sewing_machine.n.01 objects 39 +1216 grab bar grab bar 5 38 7 railing otherstructure Objects railing 30 +1217 flowerpot flowerpot 5 40 7 vase otherprop Objects vase jar 3593526 n04522168 vase.n.01 objects 39 +1218 painting painting 5 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +1219 railing railing 5 38 7 railing otherstructure Objects n04047401 railing.n.01 railing 30 +1220 stair stair 5 38 7 stairs otherstructure Objects stairs n04314914 step.n.04 stairs 16 +525 toolbox toolbox 5 39 6 chest otherfurniture Furniture n04452615 toolbox.n.01 objects 39 +204 nerf gun nerf gun 5 40 7 otherprop Objects objects 39 +693 binders binder 5 40 7 binder otherprop Objects objects 39 +179 desk lamp desk lamp 5 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +1221 quadcopter quadcopter 5 40 7 otherprop Objects objects 39 +1222 pitcher pitcher 5 40 7 pitcher otherprop Objects n03950228 pitcher.n.02 objects 39 +1223 hanging hanging 5 40 7 otherprop Objects misc 40 +1224 mail mail 5 40 7 otherprop Objects misc 40 +1225 closet ceiling closet ceiling 5 22 3 ceiling ceiling Ceiling n02990373 ceiling.n.01 ceiling 17 +1226 hoverboard hoverboard 5 40 7 otherprop Objects objects 39 +1227 beanbag chair beanbag chair 5 39 6 bean bag otherfurniture Furniture n02816656 beanbag.n.01 chair 3 +571 water heater water heater 5 40 7 water heater otherprop Objects n04560113 water_heater.n.01 misc 40 +1228 spray bottle spray bottle 5 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +556 rope rope 5 40 7 rope otherprop Objects n04108268 rope.n.01 objects 39 +280 plastic container plastic container 5 40 7 container otherprop Objects objects 39 +1229 soap bottle soap bottle 5 40 7 soap otherprop Objects objects 39 +1230 ikea bag ikea bag 4 37 7 bag bag Objects 2773838 n02773838 bag.n.06 objects 39 +1231 sleeping bag sleeping bag 4 40 7 otherprop Objects n04235860 sleeping_bag.n.01 objects 39 +1232 duffel bag duffel bag 4 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +746 frying pan frying pan 4 40 7 frying pan otherprop Objects n03400231 frying_pan.n.01 objects 39 +1233 oven mitt oven mitt 4 40 7 otherprop Objects objects 39 +1234 pot pot 4 40 7 pot otherprop Objects n04235860 sleeping_bag.n.01 objects 39 +144 hand dryer hand dryer 4 40 7 otherprop Objects objects 39 +282 dollhouse dollhouse 4 39 6 doll house otherfurniture Furniture n03219483 dollhouse.n.01 objects 39 +167 shampoo bottle shampoo bottle 4 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1235 hair brush hair brush 4 40 7 otherprop Objects n02908217 brush.n.02 objects 39 +1236 tennis racket tennis racket 4 40 7 otherprop Objects n04409806 tennis_racket.n.01 objects 39 +1237 display case display case 4 40 7 case otherprop Objects objects 39 +234 ping pong table ping pong table 4 39 6 ping pong table otherfurniture Furniture table table table 4379243 n04379243 table.n.02 table 5 +563 boiler boiler 4 40 7 otherprop Objects misc 40 +1238 bag of coffee beans bag of coffee beans 4 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +1239 bananas banana 4 40 7 otherprop Objects n00021265 food.n.01 objects 39 +1240 carseat carseat 4 40 7 otherprop Objects misc 40 +366 helmet helmet 4 40 7 otherprop Objects helmet 3513137 n03513137 helmet.n.02 clothes 38 +816 umbrella umbrella 4 40 7 umbrella otherprop Objects n04507155 umbrella.n.01 objects 39 +1241 coffee box coffee box 4 40 7 otherprop Objects objects 39 +719 envelope envelope 4 40 7 envelope otherprop Objects n03291819 envelope.n.01 objects 39 +284 wet floor sign wet floor sign 4 40 7 sign otherprop Objects misc 40 +1242 clothing rack clothing rack 4 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +247 controller controller 4 40 7 otherprop Objects n03096960 control.n.09 objects 39 +1243 bath walls bathroom wall 4 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +1244 podium podium 4 39 6 otherfurniture Furniture n03159640 dais.n.01 furniture 36 +1245 storage box storage box 4 29 7 box box Objects n02883344 box.n.01 objects 39 +1246 dolly dolly 4 40 7 otherprop Objects misc 40 +1247 shampoo shampoo 3 40 7 otherprop Objects n04183516 shampoo.n.01 objects 39 +592 paper tray paper tray 3 40 7 paper tray otherprop Objects objects 39 +385 cabinet door cabinet door 3 8 12 door door Wall door door 4 +1248 changing station changing station 3 40 7 otherprop Objects misc 40 +1249 poster printer poster printer 3 40 7 printer otherprop Objects printer 4004475 n04004475 printer.n.03 appliances 37 +133 screen screen 3 40 7 otherprop Objects n03151077 curtain.n.01 curtain 12 +301 soap bar soap bar 3 38 7 bar otherstructure Objects objects 39 +1250 crutches crutches 3 40 7 otherprop Objects n03141823 crutch.n.01 objects 39 +379 studio light studio light 3 38 7 light otherstructure Objects lighting 28 +130 stack of cups cup 3 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +1251 toilet flush button toilet flush button 3 40 7 otherprop Objects objects 39 +450 trunk trunk 3 40 7 otherprop Objects misc 40 +1252 grocery bag grocery bag 3 37 7 bag bag Objects suitcase 2773838 n03461288 grocery_bag.n.01 objects 39 +316 plastic bin plastic bin 3 40 7 bin otherprop Objects objects 39 +1253 pizza box pizza box 3 29 7 box box Objects objects 39 +385 cabinet doors cabinet door 3 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 door 4 +1254 legs legs 3 31 7 person person Objects person n05217688 person.n.02 misc 40 +461 car car 3 40 7 car otherprop Objects car car 2958343 n02958343 car.n.01 misc 40 +1255 shaving cream shaving cream 3 40 7 otherprop Objects n04186051 shaving_cream.n.01 objects 39 +1256 luggage stand luggage stand 3 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +599 shredder shredder 3 40 7 otherprop Objects n04210120 shredder.n.01 objects 39 +281 statue statue 3 40 7 sculpture otherprop Objects n04306847 statue.n.01 misc 40 +1257 urinal urinal 3 33 7 toilet toilet Objects toilet toilet n04515991 urinal.n.01 toilet 18 +1258 hose hose 3 40 7 otherprop Objects n03539875 hose.n.03 misc 40 +1259 bike pump bike pump 3 40 7 otherprop Objects objects 39 +319 coatrack coatrack 3 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 31 +1260 bear bear 3 40 7 otherprop Objects objects 39 +28 wall lamp lamp 3 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +1261 humidifier humidifier 3 40 7 otherprop Objects objects 39 +546 toothpaste toothpaste 3 40 7 toothpaste otherprop Objects objects 39 +1262 mouthwash bottle mouthwash bottle 3 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1263 poster cutter poster cutter 3 40 7 otherprop Objects objects 39 +1264 golf bag golf bag 3 37 7 bag bag Objects suitcase 2773838 n03445617 golf_bag.n.01 objects 39 +1265 food container food container 3 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1266 camera camera 3 40 7 otherprop Objects objects 39 +28 table lamp lamp 3 35 7 lamp lamp Objects lamp lamp 3636649 n04380533 table_lamp.n.01 lighting 28 +1267 yoga mat yoga mat 3 20 5 floor mat floor mat Floor n03727837 mat.n.01 floor 2 +1268 card card 3 40 7 otherprop Objects objects 39 +1269 mug mug 3 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +188 shower doors shower door 3 38 7 otherstructure Objects n04208936 shower.n.01 door 4 +689 cardboard cardboard 3 40 7 otherprop Objects objects 39 +1270 rack stand rack stand 3 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +1271 boxes of paper boxes of paper 3 29 7 box box Objects n02883344 box.n.01 objects 39 +1272 flag flag 3 40 7 otherprop Objects misc 40 +354 futon futon 3 39 6 mattress otherfurniture Furniture n03408444 futon.n.01 sofa 10 +339 magazine magazine 3 40 7 magazine otherprop Objects n06595351 magazine.n.01 objects 39 +1009 exit sign exit sign 3 40 7 exit sign otherprop Objects misc 40 +1273 rolled poster rolled poster 3 40 7 otherprop Objects objects 39 +1274 wheel wheel 3 40 7 otherprop Objects objects 39 +15 pictures picture 3 11 8 picture picture Picture n03931044 picture.n.01 picture 6 +1275 blackboard eraser blackboard eraser 3 40 7 eraser otherprop Objects n03294833 eraser.n.01 objects 39 +361 organizer organizer 3 40 7 otherprop Objects n03918737 personal_digital_assistant.n.01 objects 39 +1276 doll doll 3 40 7 toy otherprop Objects n03219135 doll.n.01 objects 39 +326 book rack book rack 3 39 6 bookrack otherfurniture Furniture objects 39 +1277 laundry bag laundry bag 3 40 7 laundry basket otherprop Objects basket 2801938 n03050864 clothes_hamper.n.01 objects 39 +1278 sponge sponge 3 40 7 otherprop Objects n01906749 sponge.n.04 objects 39 +116 seating seat 3 39 6 furniture otherfurniture Furniture n04161981 seat.n.03 furniture 36 +1184 folded chairs folded chair 2 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1279 lotion bottle lotion bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +212 can can 2 40 7 can otherprop Objects can 2946921 n02946921 can.n.01 objects 39 +1280 lunch box lunch box 2 40 7 otherprop Objects objects 39 +1281 food display food display 2 40 7 otherprop Objects misc 40 +794 storage shelf storage shelf 2 40 7 otherprop Objects shelving 31 +1282 sliding wood door sliding wood door 2 40 7 otherprop Objects door 4 +955 pants pants 2 40 7 otherprop Objects n04489008 trouser.n.01 clothes 38 +387 wood wood 2 40 7 otherprop Objects misc 40 +69 boards board 2 38 7 board otherstructure Objects board_panel 35 +65 bottles bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +523 washcloth washcloth 2 40 7 otherprop Objects n04554523 washcloth.n.01 towel 20 +389 workbench workbench 2 39 6 bench otherfurniture Furniture bench table 4379243 n04600486 workbench.n.01 table 5 +29 open kitchen cabinet kitchen cabinet 2 3 6 cabinet cabinet Furniture n02933112 cabinet.n.01 cabinet 7 +1283 organizer shelf organizer shelf 2 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +146 frame frame 2 38 7 otherstructure Objects misc 40 +130 cups cup 2 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 +372 exercise ball exercise ball 2 40 7 ball otherprop Objects n04285146 sports_equipment.n.01 gym_equipment 33 +289 easel easel 2 39 6 stand otherfurniture Furniture n03262809 easel.n.01 furniture 36 +440 garbage bag garbage bag 2 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +321 roomba roomba 2 40 7 otherprop Objects objects 39 +976 garage door garage door 2 38 7 garage door otherstructure Objects door door 4 +1256 luggage rack luggage stand 2 39 6 stand otherfurniture Furniture n04038440 shelving 31 +1284 bike lock bike lock 2 40 7 otherprop Objects objects 39 +1285 briefcase briefcase 2 40 7 otherprop Objects n02900705 briefcase.n.01 objects 39 +357 hand towel hand towel 2 27 7 towel towel Objects n03490006 hand_towel.n.01 towel 20 +1286 bath products bath product 2 40 7 otherprop Objects objects 39 +1287 star star 2 40 7 otherprop Objects n09444783 star.n.03 misc 40 +365 map map 2 40 7 map otherprop Objects n03720163 map.n.01 misc 40 +1288 coffee bean bag coffee bean bag 2 37 7 bag bag Objects suitcase 2773838 n02773838 bag.n.06 objects 39 +81 headboard headboard 2 39 6 headboard otherfurniture Furniture n03502200 headboard.n.01 bed 11 +1289 ipad ipad 2 40 7 otherprop Objects objects 39 +1290 display rack display rack 2 39 6 stand otherfurniture Furniture n04038440 rack.n.05 shelving 31 +948 traffic cone traffic cone 2 40 7 cone otherprop Objects cone objects 39 +174 toiletry toiletry 2 40 7 otherprop Objects n04447443 toiletry.n.01 objects 39 +1028 canopy canopy 2 40 7 otherprop Objects misc 40 +1291 massage chair massage chair 2 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1292 paper organizer paper organizer 2 40 7 otherprop Objects objects 39 +1005 barricade barricade 2 40 7 otherprop Objects misc 40 +235 platform platform 2 38 7 otherstructure Objects misc 40 +1293 cap cap 2 40 7 hat otherprop Objects n03497657 hat.n.01 clothes 38 +1294 dumbbell plates dumbbell plates 2 40 7 otherprop Objects objects 39 +1295 elevator elevator 2 38 7 otherstructure Objects misc 40 +1296 cooking pan cooking pan 2 40 7 pan otherprop Objects n03880531 pan.n.01 objects 39 +1297 trash bag trash bag 2 37 7 bag bag Objects objects 39 +1298 santa santa 2 40 7 otherprop Objects misc 40 +1299 jewelry box jewelry box 2 29 7 box box Objects n02883344 box.n.01 objects 39 +1300 boat boat 2 40 7 otherprop Objects misc 40 +1301 sock sock 2 21 7 clothes clothes Objects n04254777 sock.n.01 clothes 38 +1051 kinect kinect 2 40 7 kinect otherprop Objects objects 39 +566 crib crib 2 39 6 crib otherfurniture Furniture furniture 36 +1302 plastic storage bin plastic storage bin 2 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1062 cooler cooler 2 24 6 refrigerator refrigerator Furniture n03102654 cooler.n.01 appliances 37 +1303 kitchen apron kitchen apron 2 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +1304 dishwashing soap bottle dishwashing soap bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1305 xbox controller xbox controller 2 40 7 otherprop Objects objects 39 +1306 banana holder banana holder 2 40 7 otherprop Objects objects 39 +298 ping pong paddle ping pong paddle 2 40 7 otherprop Objects table 5 +1307 airplane airplane 2 40 7 otherprop Objects misc 40 +1308 conditioner bottle conditioner bottle 2 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +1309 tea kettle tea kettle 2 40 7 tea kettle otherprop Objects n04397768 teakettle.n.01 objects 39 +43 bedframe bedframe 2 39 6 otherfurniture Furniture n02822579 bedstead.n.01 bed 11 +1310 wood beam wood beam 2 38 7 otherstructure Objects beam 29 +593 toilet paper package toilet paper package 2 40 7 otherprop Objects objects 39 +1311 wall mounted coat rack wall mounted coat rack 2 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 31 +1312 film light film light 2 40 7 otherprop Objects lighting 28 +749 ceiling lamp ceiling lamp 1 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +623 chain chain 1 40 7 otherprop Objects chair 3 +1313 sofa sofa 1 6 9 sofa sofa Sofa sofa sofa sofa 4256520 n04256520 sofa.n.01 sofa 10 +99 closet wardrobe wardrobe 1 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +265 sweater sweater 1 40 7 otherprop Objects n04370048 sweater.n.01 clothes 38 +1314 kitchen mixer kitchen mixer 1 40 7 otherprop Objects appliances 37 +99 wardrobe wardrobe 1 39 6 wardrobe otherfurniture Furniture wardrobe n04550184 wardrobe.n.01 furniture 36 +1315 water softener water softener 1 40 7 otherprop Objects misc 40 +448 banister banister 1 38 7 banister otherstructure Objects n02788148 bannister.n.02 railing 30 +257 trolley trolley 1 40 7 trolley otherprop Objects n04335435 streetcar.n.01 misc 40 +1316 pantry shelf pantry shelf 1 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +786 sofa bed sofa bed 1 4 1 bed bed Bed bed bed bed 2818832 n02818832 bed.n.01 bed 11 +801 loofa loofa 1 40 7 otherprop Objects objects 39 +972 shower faucet handle shower faucet handle 1 40 7 handle otherprop Objects shower 23 +1317 toy piano toy piano 1 40 7 toy otherprop Objects n03964744 plaything.n.01 objects 39 +1318 fish fish 1 40 7 otherprop Objects n02512053 fish.n.01 objects 39 +75 file cabinets file cabinet 1 3 6 cabinet cabinet Furniture cabinet 2933112 n03337140 file.n.03 cabinet 7 +657 cat litter box cat litter box 1 29 7 box box Objects objects 39 +561 electric panel electric panel 1 40 7 otherprop Objects misc 40 +93 suitcases suitcase 1 40 7 luggage otherprop Objects n02774630 baggage.n.01 objects 39 +513 curtain rod curtain rod 1 38 7 curtain rod otherstructure Objects curtain 12 +411 bunk bed bunk bed 1 39 6 bunk bed otherfurniture Furniture bed bed bed 2818832 n02920259 bunk_bed.n.01 bed 11 +1122 chandelier chandelier 1 38 7 chandelier otherstructure Objects n03005285 chandelier.n.01 lighting 28 +922 tape tape 1 40 7 tape otherprop Objects objects 39 +88 plates plate 1 40 7 otherprop Objects n03959485 plate.n.04 objects 39 +518 alarm alarm 1 40 7 alarm otherprop Objects clock 3046257 n02694662 alarm_clock.n.01 objects 39 +814 fire hose fire hose 1 40 7 otherprop Objects n03346004 fire_hose.n.01 misc 40 +1319 toy dinosaur toy dinosaur 1 40 7 toy otherprop Objects n03964744 plaything.n.01 objects 39 +1320 cone cone 1 40 7 otherprop Objects objects 39 +649 glass doors glass door 1 8 12 door door Wall door n03221720 door.n.01 door 4 +607 hatrack hatrack 1 40 7 otherprop Objects n03059103 coatrack.n.01 shelving 31 +819 subwoofer subwoofer 1 40 7 speaker otherprop Objects speaker 3691459 n04349401 subwoofer.n.01 objects 39 +1321 fire sprinkler fire sprinkler 1 40 7 otherprop Objects misc 40 +1322 trash cabinet trash cabinet 1 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +1204 pantry walls pantry wall 1 1 12 wall wall Wall n04546855 wall.n.01 wall 1 +227 photo photo 1 40 7 photo otherprop Objects n03925226 photograph.n.01 picture 6 +817 barrier barrier 1 40 7 otherprop Objects n02796623 barrier.n.01 misc 40 +130 stacks of cups cup 1 40 7 otherprop Objects n03147509 cup.n.01 objects 39 +712 beachball beachball 1 40 7 ball otherprop Objects n02814224 beach_ball.n.01 objects 39 +1323 folded boxes folded boxes 1 40 7 otherprop Objects objects 39 +1324 contact lens solution bottle contact lens solution bottle 1 40 7 bottle otherprop Objects bottle bottle 2876657 n02876657 bottle.n.01 objects 39 +673 covered box covered box 1 29 7 box box Objects objects 39 +459 folder folder 1 40 7 folder otherprop Objects n03376279 folder.n.02 objects 39 +643 mail trays mail tray 1 40 7 mail tray otherprop Objects objects 39 +238 slipper slipper 1 40 7 otherprop Objects n04241394 slipper.n.01 clothes 38 +765 magazine rack magazine rack 1 39 6 stand otherfurniture Furniture n03704549 magazine_rack.n.01 shelving 31 +1008 sticker sticker 1 40 7 sticker otherprop Objects n07272545 gummed_label.n.01 objects 39 +225 lotion lotion 1 40 7 otherprop Objects n03690938 lotion.n.01 objects 39 +1083 buddha buddha 1 40 7 otherprop Objects objects 39 +813 file organizer file organizer 1 40 7 otherprop Objects objects 39 +138 paper towel rolls paper towel roll 1 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01 towel 20 +1145 night lamp night lamp 1 35 7 lamp lamp Objects lamp lamp 3636649 n03636649 lamp.n.02 lighting 28 +796 fuse box fuse box 1 40 7 otherprop Objects misc 40 +1325 knife block knife block 1 40 7 otherprop Objects objects 39 +363 furnace furnace 1 39 6 furnace otherfurniture Furniture n03404449 furnace.n.01 +1174 cd cases cd case 1 40 7 otherprop Objects objects 39 +38 stools stool 1 40 7 stool otherprop Objects stool n04326896 stool.n.01 stool 19 +1326 hand sanitzer dispenser hand sanitzer dispenser 1 40 7 otherprop Objects n04254120 soap_dispenser.n.01 objects 39 +997 teapot teapot 1 40 7 tea pot otherprop Objects n04398044 teapot.n.01 objects 39 +1327 pen holder pen holder 1 40 7 otherprop Objects objects 39 +1328 tray rack tray rack 1 40 7 otherprop Objects objects 39 +1329 wig wig 1 40 7 otherprop Objects n04584207 wig.n.01 objects 39 +182 switch switch 1 40 7 otherprop Objects n04372370 switch.n.01 misc 40 +280 plastic containers plastic container 1 40 7 container otherprop Objects n03094503 container.n.01 objects 39 +1330 night light night light 1 40 7 otherprop Objects lighting 28 +1331 notepad notepad 1 40 7 otherprop Objects objects 39 +1332 mail bin mail bin 1 40 7 otherprop Objects misc 40 +1333 elevator button elevator button 1 40 7 otherprop Objects misc 40 +939 gaming wheel gaming wheel 1 40 7 otherprop Objects objects 39 +1334 drum set drum set 1 40 7 otherprop Objects objects 39 +480 cosmetic bag cosmetic bag 1 37 7 bag bag Objects objects 39 +907 coffee mug coffee mug 1 40 7 vessel otherprop Objects cup or mug 3797390 n03063599 coffee_mug.n.01 objects 39 +1335 closet shelf closet shelf 1 15 6 shelves shelves Furniture bookshelf bookshelf 2871439 n02871439 bookshelf.n.01 shelving 31 +1336 baby mobile baby mobile 1 40 7 otherprop Objects objects 39 +829 diaper bin diaper bin 1 40 7 bin otherprop Objects objects 39 +947 door wall door wall 1 1 12 wall wall Wall wall 1 +1116 stepstool stepstool 1 40 7 step stool otherprop Objects objects 39 +599 paper shredder shredder 1 40 7 otherprop Objects n04210120 shredder.n.01 objects 39 +733 dress rack dress rack 1 40 7 otherprop Objects n03238762 dress_rack.n.01 misc 40 +123 cover cover 1 40 7 blanket otherprop Objects objects 39 +506 shopping bag shopping bag 1 37 7 bag bag Objects n04204081 shopping_bag.n.01 objects 39 +569 sliding door sliding door 1 8 12 door door Wall door n04239074 sliding_door.n.01 door 4 +1337 exercise bike exercise bike 1 40 7 machine otherprop Objects n04210120 shredder.n.01 gym_equipment 33 +1338 recliner chair recliner chair 1 5 4 chair chair Chair chair chair chair 3001627 n03238762 dress_rack.n.01 chair 3 +1314 kitchenaid mixer kitchen mixer 1 40 7 otherprop Objects appliances 37 +1339 soda can soda can 1 40 7 can otherprop Objects can 2946921 n02946921 can.n.01 objects 39 +1340 stovetop stovetop 1 38 7 stove otherstructure Objects stove 4330267 n04330267 stove.n.02 appliances 37 +851 stepladder stepladder 1 39 6 ladder otherfurniture Furniture stairs n04315599 step_ladder.n.01 stairs 16 +142 tap tap 1 40 7 faucet otherprop Objects faucet 3325088 n04559451 water_faucet.n.01 objects 39 +436 cable cable 1 40 7 cables otherprop Objects objects 39 +1341 baby changing station baby changing station 1 39 6 otherfurniture Furniture furniture 36 +1342 costume costume 1 21 7 clothes clothes Objects n02728440 apparel.n.01 clothes 38 +885 rocking chair rocking chair 1 5 4 chair chair Chair chair chair chair 3001627 n04099969 rocking_chair.n.01 chair 3 +693 binder binder 1 40 7 binder otherprop Objects objects 39 +815 media center media center 1 3 6 cabinet cabinet Furniture cabinet 2933112 n02933112 cabinet.n.01 cabinet 7 +401 towel rack towel rack 1 40 7 otherprop Objects n04459773 towel_rack.n.01 misc 40 +1343 medal medal 1 40 7 otherprop Objects objects 39 +1184 stack of folded chairs folded chair 1 5 4 chair chair Chair chair chair chair 3001627 n03001627 chair.n.01 chair 3 +1344 telescope telescope 1 40 7 otherprop Objects n04403638 telescope.n.01 objects 39 +1345 closet doorframe closet doorframe 1 8 12 door door Wall door door 4 +160 glass glass 1 38 7 glass otherstructure Objects n03438257 glass.n.02 misc 40 +1126 baseball cap baseball cap 1 40 7 otherprop Objects cap 2954340 n02799323 baseball_cap.n.01 clothes 38 +1346 battery disposal jar battery disposal jar 1 40 7 jar otherprop Objects jar 3593526 n03593526 jar.n.01 objects 39 +332 mop mop 1 40 7 otherprop Objects n04367480 swab.n.02 objects 39 +397 tank tank 1 40 7 otherprop Objects objects 39 +643 mail tray mail tray 1 40 7 mail tray otherprop Objects objects 39 +551 centerpiece centerpiece 1 40 7 centerpiece otherprop Objects n02994419 centerpiece.n.02 objects 39 +1163 stick stick 1 40 7 stick otherprop Objects objects 39 +1347 closet floor closet floor 1 2 5 floor floor Floor n03365592 floor.n.01 floor 2 +1348 dryer sheets dryer sheets 1 40 7 otherprop Objects objects 39 +803 bycicle bycicle 1 40 7 otherprop Objects misc 40 +484 flower stand flower stand 1 39 6 stand otherfurniture Furniture furniture 36 +1349 air mattress air mattress 1 4 1 bed bed Bed bed bed bed 2818832 n02690809 air_mattress.n.01 bed 11 +1350 clip clip 1 40 7 otherprop Objects objects 39 +222 side table side table 1 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +1253 pizza boxes pizza box 1 29 7 box box Objects n02883344 box.n.01 objects 39 +1351 display display 1 39 7 otherfurniture Furniture n03211117 display.n.06 misc 40 +1352 postcard postcard 1 40 7 otherprop Objects objects 39 +828 display sign display sign 1 40 7 sign otherprop Objects misc 40 +1353 paper towel paper towel 1 40 7 paper towel otherprop Objects n03887697 paper_towel.n.01 towel 20 +612 boots boot 1 40 7 shoe otherprop Objects n04199027 shoe.n.01 clothes 38 +1354 tennis racket bag tennis racket bag 1 40 7 otherprop Objects objects 39 +1355 air hockey table air hockey table 1 7 10 table table Table table table table 4379243 n04379243 table.n.02 table 5 +1301 socks sock 1 21 7 clothes clothes Objects n04254777 sock.n.01 clothes 38 +1356 food bag food bag 1 37 7 bag bag Objects objects 39 +1199 clothes hangers clothes hanger 1 40 7 otherprop Objects n03057920 coat_hanger.n.01 misc 40 +1357 starbucks cup starbucks cup 1 40 7 cup otherprop Objects cup cup or mug 3797390 n03797390 mug.n.04 objects 39 diff --git a/models/Scanrefer/data/scannet/meta_data/scannetv2.txt b/models/Scanrefer/data/scannet/meta_data/scannetv2.txt new file mode 100644 index 0000000..2c242ef --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/scannetv2.txt @@ -0,0 +1,1613 @@ +scene0000_00 +scene0000_01 +scene0000_02 +scene0001_00 +scene0001_01 +scene0002_00 +scene0002_01 +scene0003_00 +scene0003_01 +scene0003_02 +scene0004_00 +scene0005_00 +scene0005_01 +scene0006_00 +scene0006_01 +scene0006_02 +scene0007_00 +scene0008_00 +scene0009_00 +scene0009_01 +scene0009_02 +scene0010_00 +scene0010_01 +scene0011_00 +scene0011_01 +scene0012_00 +scene0012_01 +scene0012_02 +scene0013_00 +scene0013_01 +scene0013_02 +scene0014_00 +scene0015_00 +scene0016_00 +scene0016_01 +scene0016_02 +scene0017_00 +scene0017_01 +scene0017_02 +scene0018_00 +scene0019_00 +scene0019_01 +scene0020_00 +scene0020_01 +scene0021_00 +scene0022_00 +scene0022_01 +scene0023_00 +scene0024_00 +scene0024_01 +scene0024_02 +scene0025_00 +scene0025_01 +scene0025_02 +scene0026_00 +scene0027_00 +scene0027_01 +scene0027_02 +scene0028_00 +scene0029_00 +scene0029_01 +scene0029_02 +scene0030_00 +scene0030_01 +scene0030_02 +scene0031_00 +scene0031_01 +scene0031_02 +scene0032_00 +scene0032_01 +scene0033_00 +scene0034_00 +scene0034_01 +scene0034_02 +scene0035_00 +scene0035_01 +scene0036_00 +scene0036_01 +scene0037_00 +scene0038_00 +scene0038_01 +scene0038_02 +scene0039_00 +scene0039_01 +scene0040_00 +scene0040_01 +scene0041_00 +scene0041_01 +scene0042_00 +scene0042_01 +scene0042_02 +scene0043_00 +scene0043_01 +scene0044_00 +scene0044_01 +scene0044_02 +scene0045_00 +scene0045_01 +scene0046_00 +scene0046_01 +scene0046_02 +scene0047_00 +scene0048_00 +scene0048_01 +scene0049_00 +scene0050_00 +scene0050_01 +scene0050_02 +scene0051_00 +scene0051_01 +scene0051_02 +scene0051_03 +scene0052_00 +scene0052_01 +scene0052_02 +scene0053_00 +scene0054_00 +scene0055_00 +scene0055_01 +scene0055_02 +scene0056_00 +scene0056_01 +scene0057_00 +scene0057_01 +scene0058_00 +scene0058_01 +scene0059_00 +scene0059_01 +scene0059_02 +scene0060_00 +scene0060_01 +scene0061_00 +scene0061_01 +scene0062_00 +scene0062_01 +scene0062_02 +scene0063_00 +scene0064_00 +scene0064_01 +scene0065_00 +scene0065_01 +scene0065_02 +scene0066_00 +scene0067_00 +scene0067_01 +scene0067_02 +scene0068_00 +scene0068_01 +scene0069_00 +scene0070_00 +scene0071_00 +scene0072_00 +scene0072_01 +scene0072_02 +scene0073_00 +scene0073_01 +scene0073_02 +scene0073_03 +scene0074_00 +scene0074_01 +scene0074_02 +scene0075_00 +scene0076_00 +scene0077_00 +scene0077_01 +scene0078_00 +scene0078_01 +scene0078_02 +scene0079_00 +scene0079_01 +scene0080_00 +scene0080_01 +scene0080_02 +scene0081_00 +scene0081_01 +scene0081_02 +scene0082_00 +scene0083_00 +scene0083_01 +scene0084_00 +scene0084_01 +scene0084_02 +scene0085_00 +scene0085_01 +scene0086_00 +scene0086_01 +scene0086_02 +scene0087_00 +scene0087_01 +scene0087_02 +scene0088_00 +scene0088_01 +scene0088_02 +scene0088_03 +scene0089_00 +scene0089_01 +scene0089_02 +scene0090_00 +scene0091_00 +scene0092_00 +scene0092_01 +scene0092_02 +scene0092_03 +scene0092_04 +scene0093_00 +scene0093_01 +scene0093_02 +scene0094_00 +scene0095_00 +scene0095_01 +scene0096_00 +scene0096_01 +scene0096_02 +scene0097_00 +scene0098_00 +scene0098_01 +scene0099_00 +scene0099_01 +scene0100_00 +scene0100_01 +scene0100_02 +scene0101_00 +scene0101_01 +scene0101_02 +scene0101_03 +scene0101_04 +scene0101_05 +scene0102_00 +scene0102_01 +scene0103_00 +scene0103_01 +scene0104_00 +scene0105_00 +scene0105_01 +scene0105_02 +scene0106_00 +scene0106_01 +scene0106_02 +scene0107_00 +scene0108_00 +scene0109_00 +scene0109_01 +scene0110_00 +scene0110_01 +scene0110_02 +scene0111_00 +scene0111_01 +scene0111_02 +scene0112_00 +scene0112_01 +scene0112_02 +scene0113_00 +scene0113_01 +scene0114_00 +scene0114_01 +scene0114_02 +scene0115_00 +scene0115_01 +scene0115_02 +scene0116_00 +scene0116_01 +scene0116_02 +scene0117_00 +scene0118_00 +scene0118_01 +scene0118_02 +scene0119_00 +scene0120_00 +scene0120_01 +scene0121_00 +scene0121_01 +scene0121_02 +scene0122_00 +scene0122_01 +scene0123_00 +scene0123_01 +scene0123_02 +scene0124_00 +scene0124_01 +scene0125_00 +scene0126_00 +scene0126_01 +scene0126_02 +scene0127_00 +scene0127_01 +scene0128_00 +scene0129_00 +scene0130_00 +scene0131_00 +scene0131_01 +scene0131_02 +scene0132_00 +scene0132_01 +scene0132_02 +scene0133_00 +scene0134_00 +scene0134_01 +scene0134_02 +scene0135_00 +scene0136_00 +scene0136_01 +scene0136_02 +scene0137_00 +scene0137_01 +scene0137_02 +scene0138_00 +scene0139_00 +scene0140_00 +scene0140_01 +scene0141_00 +scene0141_01 +scene0141_02 +scene0142_00 +scene0142_01 +scene0143_00 +scene0143_01 +scene0143_02 +scene0144_00 +scene0144_01 +scene0145_00 +scene0146_00 +scene0146_01 +scene0146_02 +scene0147_00 +scene0147_01 +scene0148_00 +scene0149_00 +scene0150_00 +scene0150_01 +scene0150_02 +scene0151_00 +scene0151_01 +scene0152_00 +scene0152_01 +scene0152_02 +scene0153_00 +scene0153_01 +scene0154_00 +scene0155_00 +scene0155_01 +scene0155_02 +scene0156_00 +scene0157_00 +scene0157_01 +scene0158_00 +scene0158_01 +scene0158_02 +scene0159_00 +scene0160_00 +scene0160_01 +scene0160_02 +scene0160_03 +scene0160_04 +scene0161_00 +scene0161_01 +scene0161_02 +scene0162_00 +scene0163_00 +scene0163_01 +scene0164_00 +scene0164_01 +scene0164_02 +scene0164_03 +scene0165_00 +scene0165_01 +scene0165_02 +scene0166_00 +scene0166_01 +scene0166_02 +scene0167_00 +scene0168_00 +scene0168_01 +scene0168_02 +scene0169_00 +scene0169_01 +scene0170_00 +scene0170_01 +scene0170_02 +scene0171_00 +scene0171_01 +scene0172_00 +scene0172_01 +scene0173_00 +scene0173_01 +scene0173_02 +scene0174_00 +scene0174_01 +scene0175_00 +scene0176_00 +scene0177_00 +scene0177_01 +scene0177_02 +scene0178_00 +scene0179_00 +scene0180_00 +scene0181_00 +scene0181_01 +scene0181_02 +scene0181_03 +scene0182_00 +scene0182_01 +scene0182_02 +scene0183_00 +scene0184_00 +scene0185_00 +scene0186_00 +scene0186_01 +scene0187_00 +scene0187_01 +scene0188_00 +scene0189_00 +scene0190_00 +scene0191_00 +scene0191_01 +scene0191_02 +scene0192_00 +scene0192_01 +scene0192_02 +scene0193_00 +scene0193_01 +scene0194_00 +scene0195_00 +scene0195_01 +scene0195_02 +scene0196_00 +scene0197_00 +scene0197_01 +scene0197_02 +scene0198_00 +scene0199_00 +scene0200_00 +scene0200_01 +scene0200_02 +scene0201_00 +scene0201_01 +scene0201_02 +scene0202_00 +scene0203_00 +scene0203_01 +scene0203_02 +scene0204_00 +scene0204_01 +scene0204_02 +scene0205_00 +scene0205_01 +scene0205_02 +scene0206_00 +scene0206_01 +scene0206_02 +scene0207_00 +scene0207_01 +scene0207_02 +scene0208_00 +scene0209_00 +scene0209_01 +scene0209_02 +scene0210_00 +scene0210_01 +scene0211_00 +scene0211_01 +scene0211_02 +scene0211_03 +scene0212_00 +scene0212_01 +scene0212_02 +scene0213_00 +scene0214_00 +scene0214_01 +scene0214_02 +scene0215_00 +scene0215_01 +scene0216_00 +scene0217_00 +scene0218_00 +scene0218_01 +scene0219_00 +scene0220_00 +scene0220_01 +scene0220_02 +scene0221_00 +scene0221_01 +scene0222_00 +scene0222_01 +scene0223_00 +scene0223_01 +scene0223_02 +scene0224_00 +scene0225_00 +scene0226_00 +scene0226_01 +scene0227_00 +scene0228_00 +scene0229_00 +scene0229_01 +scene0229_02 +scene0230_00 +scene0231_00 +scene0231_01 +scene0231_02 +scene0232_00 +scene0232_01 +scene0232_02 +scene0233_00 +scene0233_01 +scene0234_00 +scene0235_00 +scene0236_00 +scene0236_01 +scene0237_00 +scene0237_01 +scene0238_00 +scene0238_01 +scene0239_00 +scene0239_01 +scene0239_02 +scene0240_00 +scene0241_00 +scene0241_01 +scene0241_02 +scene0242_00 +scene0242_01 +scene0242_02 +scene0243_00 +scene0244_00 +scene0244_01 +scene0245_00 +scene0246_00 +scene0247_00 +scene0247_01 +scene0248_00 +scene0248_01 +scene0248_02 +scene0249_00 +scene0250_00 +scene0250_01 +scene0250_02 +scene0251_00 +scene0252_00 +scene0253_00 +scene0254_00 +scene0254_01 +scene0255_00 +scene0255_01 +scene0255_02 +scene0256_00 +scene0256_01 +scene0256_02 +scene0257_00 +scene0258_00 +scene0259_00 +scene0259_01 +scene0260_00 +scene0260_01 +scene0260_02 +scene0261_00 +scene0261_01 +scene0261_02 +scene0261_03 +scene0262_00 +scene0262_01 +scene0263_00 +scene0263_01 +scene0264_00 +scene0264_01 +scene0264_02 +scene0265_00 +scene0265_01 +scene0265_02 +scene0266_00 +scene0266_01 +scene0267_00 +scene0268_00 +scene0268_01 +scene0268_02 +scene0269_00 +scene0269_01 +scene0269_02 +scene0270_00 +scene0270_01 +scene0270_02 +scene0271_00 +scene0271_01 +scene0272_00 +scene0272_01 +scene0273_00 +scene0273_01 +scene0274_00 +scene0274_01 +scene0274_02 +scene0275_00 +scene0276_00 +scene0276_01 +scene0277_00 +scene0277_01 +scene0277_02 +scene0278_00 +scene0278_01 +scene0279_00 +scene0279_01 +scene0279_02 +scene0280_00 +scene0280_01 +scene0280_02 +scene0281_00 +scene0282_00 +scene0282_01 +scene0282_02 +scene0283_00 +scene0284_00 +scene0285_00 +scene0286_00 +scene0286_01 +scene0286_02 +scene0286_03 +scene0287_00 +scene0288_00 +scene0288_01 +scene0288_02 +scene0289_00 +scene0289_01 +scene0290_00 +scene0291_00 +scene0291_01 +scene0291_02 +scene0292_00 +scene0292_01 +scene0293_00 +scene0293_01 +scene0294_00 +scene0294_01 +scene0294_02 +scene0295_00 +scene0295_01 +scene0296_00 +scene0296_01 +scene0297_00 +scene0297_01 +scene0297_02 +scene0298_00 +scene0299_00 +scene0299_01 +scene0300_00 +scene0300_01 +scene0301_00 +scene0301_01 +scene0301_02 +scene0302_00 +scene0302_01 +scene0303_00 +scene0303_01 +scene0303_02 +scene0304_00 +scene0305_00 +scene0305_01 +scene0306_00 +scene0306_01 +scene0307_00 +scene0307_01 +scene0307_02 +scene0308_00 +scene0309_00 +scene0309_01 +scene0310_00 +scene0310_01 +scene0310_02 +scene0311_00 +scene0312_00 +scene0312_01 +scene0312_02 +scene0313_00 +scene0313_01 +scene0313_02 +scene0314_00 +scene0315_00 +scene0316_00 +scene0317_00 +scene0317_01 +scene0318_00 +scene0319_00 +scene0320_00 +scene0320_01 +scene0320_02 +scene0320_03 +scene0321_00 +scene0322_00 +scene0323_00 +scene0323_01 +scene0324_00 +scene0324_01 +scene0325_00 +scene0325_01 +scene0326_00 +scene0327_00 +scene0328_00 +scene0329_00 +scene0329_01 +scene0329_02 +scene0330_00 +scene0331_00 +scene0331_01 +scene0332_00 +scene0332_01 +scene0332_02 +scene0333_00 +scene0334_00 +scene0334_01 +scene0334_02 +scene0335_00 +scene0335_01 +scene0335_02 +scene0336_00 +scene0336_01 +scene0337_00 +scene0337_01 +scene0337_02 +scene0338_00 +scene0338_01 +scene0338_02 +scene0339_00 +scene0340_00 +scene0340_01 +scene0340_02 +scene0341_00 +scene0341_01 +scene0342_00 +scene0343_00 +scene0344_00 +scene0344_01 +scene0345_00 +scene0345_01 +scene0346_00 +scene0346_01 +scene0347_00 +scene0347_01 +scene0347_02 +scene0348_00 +scene0348_01 +scene0348_02 +scene0349_00 +scene0349_01 +scene0350_00 +scene0350_01 +scene0350_02 +scene0351_00 +scene0351_01 +scene0352_00 +scene0352_01 +scene0352_02 +scene0353_00 +scene0353_01 +scene0353_02 +scene0354_00 +scene0355_00 +scene0355_01 +scene0356_00 +scene0356_01 +scene0356_02 +scene0357_00 +scene0357_01 +scene0358_00 +scene0358_01 +scene0358_02 +scene0359_00 +scene0359_01 +scene0360_00 +scene0361_00 +scene0361_01 +scene0361_02 +scene0362_00 +scene0362_01 +scene0362_02 +scene0362_03 +scene0363_00 +scene0364_00 +scene0364_01 +scene0365_00 +scene0365_01 +scene0365_02 +scene0366_00 +scene0367_00 +scene0367_01 +scene0368_00 +scene0368_01 +scene0369_00 +scene0369_01 +scene0369_02 +scene0370_00 +scene0370_01 +scene0370_02 +scene0371_00 +scene0371_01 +scene0372_00 +scene0373_00 +scene0373_01 +scene0374_00 +scene0375_00 +scene0375_01 +scene0375_02 +scene0376_00 +scene0376_01 +scene0376_02 +scene0377_00 +scene0377_01 +scene0377_02 +scene0378_00 +scene0378_01 +scene0378_02 +scene0379_00 +scene0380_00 +scene0380_01 +scene0380_02 +scene0381_00 +scene0381_01 +scene0381_02 +scene0382_00 +scene0382_01 +scene0383_00 +scene0383_01 +scene0383_02 +scene0384_00 +scene0385_00 +scene0385_01 +scene0385_02 +scene0386_00 +scene0387_00 +scene0387_01 +scene0387_02 +scene0388_00 +scene0388_01 +scene0389_00 +scene0390_00 +scene0391_00 +scene0392_00 +scene0392_01 +scene0392_02 +scene0393_00 +scene0393_01 +scene0393_02 +scene0394_00 +scene0394_01 +scene0395_00 +scene0395_01 +scene0395_02 +scene0396_00 +scene0396_01 +scene0396_02 +scene0397_00 +scene0397_01 +scene0398_00 +scene0398_01 +scene0399_00 +scene0399_01 +scene0400_00 +scene0400_01 +scene0401_00 +scene0402_00 +scene0403_00 +scene0403_01 +scene0404_00 +scene0404_01 +scene0404_02 +scene0405_00 +scene0406_00 +scene0406_01 +scene0406_02 +scene0407_00 +scene0407_01 +scene0408_00 +scene0408_01 +scene0409_00 +scene0409_01 +scene0410_00 +scene0410_01 +scene0411_00 +scene0411_01 +scene0411_02 +scene0412_00 +scene0412_01 +scene0413_00 +scene0414_00 +scene0415_00 +scene0415_01 +scene0415_02 +scene0416_00 +scene0416_01 +scene0416_02 +scene0416_03 +scene0416_04 +scene0417_00 +scene0418_00 +scene0418_01 +scene0418_02 +scene0419_00 +scene0419_01 +scene0419_02 +scene0420_00 +scene0420_01 +scene0420_02 +scene0421_00 +scene0421_01 +scene0421_02 +scene0422_00 +scene0423_00 +scene0423_01 +scene0423_02 +scene0424_00 +scene0424_01 +scene0424_02 +scene0425_00 +scene0425_01 +scene0426_00 +scene0426_01 +scene0426_02 +scene0426_03 +scene0427_00 +scene0428_00 +scene0428_01 +scene0429_00 +scene0430_00 +scene0430_01 +scene0431_00 +scene0432_00 +scene0432_01 +scene0433_00 +scene0434_00 +scene0434_01 +scene0434_02 +scene0435_00 +scene0435_01 +scene0435_02 +scene0435_03 +scene0436_00 +scene0437_00 +scene0437_01 +scene0438_00 +scene0439_00 +scene0439_01 +scene0440_00 +scene0440_01 +scene0440_02 +scene0441_00 +scene0442_00 +scene0443_00 +scene0444_00 +scene0444_01 +scene0445_00 +scene0445_01 +scene0446_00 +scene0446_01 +scene0447_00 +scene0447_01 +scene0447_02 +scene0448_00 +scene0448_01 +scene0448_02 +scene0449_00 +scene0449_01 +scene0449_02 +scene0450_00 +scene0451_00 +scene0451_01 +scene0451_02 +scene0451_03 +scene0451_04 +scene0451_05 +scene0452_00 +scene0452_01 +scene0452_02 +scene0453_00 +scene0453_01 +scene0454_00 +scene0455_00 +scene0456_00 +scene0456_01 +scene0457_00 +scene0457_01 +scene0457_02 +scene0458_00 +scene0458_01 +scene0459_00 +scene0459_01 +scene0460_00 +scene0461_00 +scene0462_00 +scene0463_00 +scene0463_01 +scene0464_00 +scene0465_00 +scene0465_01 +scene0466_00 +scene0466_01 +scene0467_00 +scene0468_00 +scene0468_01 +scene0468_02 +scene0469_00 +scene0469_01 +scene0469_02 +scene0470_00 +scene0470_01 +scene0471_00 +scene0471_01 +scene0471_02 +scene0472_00 +scene0472_01 +scene0472_02 +scene0473_00 +scene0473_01 +scene0474_00 +scene0474_01 +scene0474_02 +scene0474_03 +scene0474_04 +scene0474_05 +scene0475_00 +scene0475_01 +scene0475_02 +scene0476_00 +scene0476_01 +scene0476_02 +scene0477_00 +scene0477_01 +scene0478_00 +scene0478_01 +scene0479_00 +scene0479_01 +scene0479_02 +scene0480_00 +scene0480_01 +scene0481_00 +scene0481_01 +scene0482_00 +scene0482_01 +scene0483_00 +scene0484_00 +scene0484_01 +scene0485_00 +scene0486_00 +scene0487_00 +scene0487_01 +scene0488_00 +scene0488_01 +scene0489_00 +scene0489_01 +scene0489_02 +scene0490_00 +scene0491_00 +scene0492_00 +scene0492_01 +scene0493_00 +scene0493_01 +scene0494_00 +scene0495_00 +scene0496_00 +scene0497_00 +scene0498_00 +scene0498_01 +scene0498_02 +scene0499_00 +scene0500_00 +scene0500_01 +scene0501_00 +scene0501_01 +scene0501_02 +scene0502_00 +scene0502_01 +scene0502_02 +scene0503_00 +scene0504_00 +scene0505_00 +scene0505_01 +scene0505_02 +scene0505_03 +scene0505_04 +scene0506_00 +scene0507_00 +scene0508_00 +scene0508_01 +scene0508_02 +scene0509_00 +scene0509_01 +scene0509_02 +scene0510_00 +scene0510_01 +scene0510_02 +scene0511_00 +scene0511_01 +scene0512_00 +scene0513_00 +scene0514_00 +scene0514_01 +scene0515_00 +scene0515_01 +scene0515_02 +scene0516_00 +scene0516_01 +scene0517_00 +scene0517_01 +scene0517_02 +scene0518_00 +scene0519_00 +scene0520_00 +scene0520_01 +scene0521_00 +scene0522_00 +scene0523_00 +scene0523_01 +scene0523_02 +scene0524_00 +scene0524_01 +scene0525_00 +scene0525_01 +scene0525_02 +scene0526_00 +scene0526_01 +scene0527_00 +scene0528_00 +scene0528_01 +scene0529_00 +scene0529_01 +scene0529_02 +scene0530_00 +scene0531_00 +scene0532_00 +scene0532_01 +scene0533_00 +scene0533_01 +scene0534_00 +scene0534_01 +scene0535_00 +scene0536_00 +scene0536_01 +scene0536_02 +scene0537_00 +scene0538_00 +scene0539_00 +scene0539_01 +scene0539_02 +scene0540_00 +scene0540_01 +scene0540_02 +scene0541_00 +scene0541_01 +scene0541_02 +scene0542_00 +scene0543_00 +scene0543_01 +scene0543_02 +scene0544_00 +scene0545_00 +scene0545_01 +scene0545_02 +scene0546_00 +scene0547_00 +scene0547_01 +scene0547_02 +scene0548_00 +scene0548_01 +scene0548_02 +scene0549_00 +scene0549_01 +scene0550_00 +scene0551_00 +scene0552_00 +scene0552_01 +scene0553_00 +scene0553_01 +scene0553_02 +scene0554_00 +scene0554_01 +scene0555_00 +scene0556_00 +scene0556_01 +scene0557_00 +scene0557_01 +scene0557_02 +scene0558_00 +scene0558_01 +scene0558_02 +scene0559_00 +scene0559_01 +scene0559_02 +scene0560_00 +scene0561_00 +scene0561_01 +scene0562_00 +scene0563_00 +scene0564_00 +scene0565_00 +scene0566_00 +scene0567_00 +scene0567_01 +scene0568_00 +scene0568_01 +scene0568_02 +scene0569_00 +scene0569_01 +scene0570_00 +scene0570_01 +scene0570_02 +scene0571_00 +scene0571_01 +scene0572_00 +scene0572_01 +scene0572_02 +scene0573_00 +scene0573_01 +scene0574_00 +scene0574_01 +scene0574_02 +scene0575_00 +scene0575_01 +scene0575_02 +scene0576_00 +scene0576_01 +scene0576_02 +scene0577_00 +scene0578_00 +scene0578_01 +scene0578_02 +scene0579_00 +scene0579_01 +scene0579_02 +scene0580_00 +scene0580_01 +scene0581_00 +scene0581_01 +scene0581_02 +scene0582_00 +scene0582_01 +scene0582_02 +scene0583_00 +scene0583_01 +scene0583_02 +scene0584_00 +scene0584_01 +scene0584_02 +scene0585_00 +scene0585_01 +scene0586_00 +scene0586_01 +scene0586_02 +scene0587_00 +scene0587_01 +scene0587_02 +scene0587_03 +scene0588_00 +scene0588_01 +scene0588_02 +scene0588_03 +scene0589_00 +scene0589_01 +scene0589_02 +scene0590_00 +scene0590_01 +scene0591_00 +scene0591_01 +scene0591_02 +scene0592_00 +scene0592_01 +scene0593_00 +scene0593_01 +scene0594_00 +scene0595_00 +scene0596_00 +scene0596_01 +scene0596_02 +scene0597_00 +scene0597_01 +scene0597_02 +scene0598_00 +scene0598_01 +scene0598_02 +scene0599_00 +scene0599_01 +scene0599_02 +scene0600_00 +scene0600_01 +scene0600_02 +scene0601_00 +scene0601_01 +scene0602_00 +scene0603_00 +scene0603_01 +scene0604_00 +scene0604_01 +scene0604_02 +scene0605_00 +scene0605_01 +scene0606_00 +scene0606_01 +scene0606_02 +scene0607_00 +scene0607_01 +scene0608_00 +scene0608_01 +scene0608_02 +scene0609_00 +scene0609_01 +scene0609_02 +scene0609_03 +scene0610_00 +scene0610_01 +scene0610_02 +scene0611_00 +scene0611_01 +scene0612_00 +scene0612_01 +scene0613_00 +scene0613_01 +scene0613_02 +scene0614_00 +scene0614_01 +scene0614_02 +scene0615_00 +scene0615_01 +scene0616_00 +scene0616_01 +scene0617_00 +scene0618_00 +scene0619_00 +scene0620_00 +scene0620_01 +scene0621_00 +scene0622_00 +scene0622_01 +scene0623_00 +scene0623_01 +scene0624_00 +scene0625_00 +scene0625_01 +scene0626_00 +scene0626_01 +scene0626_02 +scene0627_00 +scene0627_01 +scene0628_00 +scene0628_01 +scene0628_02 +scene0629_00 +scene0629_01 +scene0629_02 +scene0630_00 +scene0630_01 +scene0630_02 +scene0630_03 +scene0630_04 +scene0630_05 +scene0630_06 +scene0631_00 +scene0631_01 +scene0631_02 +scene0632_00 +scene0633_00 +scene0633_01 +scene0634_00 +scene0635_00 +scene0635_01 +scene0636_00 +scene0637_00 +scene0638_00 +scene0639_00 +scene0640_00 +scene0640_01 +scene0640_02 +scene0641_00 +scene0642_00 +scene0642_01 +scene0642_02 +scene0642_03 +scene0643_00 +scene0644_00 +scene0645_00 +scene0645_01 +scene0645_02 +scene0646_00 +scene0646_01 +scene0646_02 +scene0647_00 +scene0647_01 +scene0648_00 +scene0648_01 +scene0649_00 +scene0649_01 +scene0650_00 +scene0651_00 +scene0651_01 +scene0651_02 +scene0652_00 +scene0653_00 +scene0653_01 +scene0654_00 +scene0654_01 +scene0655_00 +scene0655_01 +scene0655_02 +scene0656_00 +scene0656_01 +scene0656_02 +scene0656_03 +scene0657_00 +scene0658_00 +scene0659_00 +scene0659_01 +scene0660_00 +scene0661_00 +scene0662_00 +scene0662_01 +scene0662_02 +scene0663_00 +scene0663_01 +scene0663_02 +scene0664_00 +scene0664_01 +scene0664_02 +scene0665_00 +scene0665_01 +scene0666_00 +scene0666_01 +scene0666_02 +scene0667_00 +scene0667_01 +scene0667_02 +scene0668_00 +scene0669_00 +scene0669_01 +scene0670_00 +scene0670_01 +scene0671_00 +scene0671_01 +scene0672_00 +scene0672_01 +scene0673_00 +scene0673_01 +scene0673_02 +scene0673_03 +scene0673_04 +scene0673_05 +scene0674_00 +scene0674_01 +scene0675_00 +scene0675_01 +scene0676_00 +scene0676_01 +scene0677_00 +scene0677_01 +scene0677_02 +scene0678_00 +scene0678_01 +scene0678_02 +scene0679_00 +scene0679_01 +scene0680_00 +scene0680_01 +scene0681_00 +scene0682_00 +scene0683_00 +scene0684_00 +scene0684_01 +scene0685_00 +scene0685_01 +scene0685_02 +scene0686_00 +scene0686_01 +scene0686_02 +scene0687_00 +scene0688_00 +scene0689_00 +scene0690_00 +scene0690_01 +scene0691_00 +scene0691_01 +scene0692_00 +scene0692_01 +scene0692_02 +scene0692_03 +scene0692_04 +scene0693_00 +scene0693_01 +scene0693_02 +scene0694_00 +scene0694_01 +scene0695_00 +scene0695_01 +scene0695_02 +scene0695_03 +scene0696_00 +scene0696_01 +scene0696_02 +scene0697_00 +scene0697_01 +scene0697_02 +scene0697_03 +scene0698_00 +scene0698_01 +scene0699_00 +scene0700_00 +scene0700_01 +scene0700_02 +scene0701_00 +scene0701_01 +scene0701_02 +scene0702_00 +scene0702_01 +scene0702_02 +scene0703_00 +scene0703_01 +scene0704_00 +scene0704_01 +scene0705_00 +scene0705_01 +scene0705_02 +scene0706_00 +scene0707_00 +scene0708_00 +scene0709_00 +scene0710_00 +scene0711_00 +scene0712_00 +scene0713_00 +scene0714_00 +scene0715_00 +scene0716_00 +scene0717_00 +scene0718_00 +scene0719_00 +scene0720_00 +scene0721_00 +scene0722_00 +scene0723_00 +scene0724_00 +scene0725_00 +scene0726_00 +scene0727_00 +scene0728_00 +scene0729_00 +scene0730_00 +scene0731_00 +scene0732_00 +scene0733_00 +scene0734_00 +scene0735_00 +scene0736_00 +scene0737_00 +scene0738_00 +scene0739_00 +scene0740_00 +scene0741_00 +scene0742_00 +scene0743_00 +scene0744_00 +scene0745_00 +scene0746_00 +scene0747_00 +scene0748_00 +scene0749_00 +scene0750_00 +scene0751_00 +scene0752_00 +scene0753_00 +scene0754_00 +scene0755_00 +scene0756_00 +scene0757_00 +scene0758_00 +scene0759_00 +scene0760_00 +scene0761_00 +scene0762_00 +scene0763_00 +scene0764_00 +scene0765_00 +scene0766_00 +scene0767_00 +scene0768_00 +scene0769_00 +scene0770_00 +scene0771_00 +scene0772_00 +scene0773_00 +scene0774_00 +scene0775_00 +scene0776_00 +scene0777_00 +scene0778_00 +scene0779_00 +scene0780_00 +scene0781_00 +scene0782_00 +scene0783_00 +scene0784_00 +scene0785_00 +scene0786_00 +scene0787_00 +scene0788_00 +scene0789_00 +scene0790_00 +scene0791_00 +scene0792_00 +scene0793_00 +scene0794_00 +scene0795_00 +scene0796_00 +scene0797_00 +scene0798_00 +scene0799_00 +scene0800_00 +scene0801_00 +scene0802_00 +scene0803_00 +scene0804_00 +scene0805_00 +scene0806_00 diff --git a/models/Scanrefer/data/scannet/meta_data/scannetv2_test.txt b/models/Scanrefer/data/scannet/meta_data/scannetv2_test.txt new file mode 100644 index 0000000..79d15b0 --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/scannetv2_test.txt @@ -0,0 +1,100 @@ +scene0707_00 +scene0708_00 +scene0709_00 +scene0710_00 +scene0711_00 +scene0712_00 +scene0713_00 +scene0714_00 +scene0715_00 +scene0716_00 +scene0717_00 +scene0718_00 +scene0719_00 +scene0720_00 +scene0721_00 +scene0722_00 +scene0723_00 +scene0724_00 +scene0725_00 +scene0726_00 +scene0727_00 +scene0728_00 +scene0729_00 +scene0730_00 +scene0731_00 +scene0732_00 +scene0733_00 +scene0734_00 +scene0735_00 +scene0736_00 +scene0737_00 +scene0738_00 +scene0739_00 +scene0740_00 +scene0741_00 +scene0742_00 +scene0743_00 +scene0744_00 +scene0745_00 +scene0746_00 +scene0747_00 +scene0748_00 +scene0749_00 +scene0750_00 +scene0751_00 +scene0752_00 +scene0753_00 +scene0754_00 +scene0755_00 +scene0756_00 +scene0757_00 +scene0758_00 +scene0759_00 +scene0760_00 +scene0761_00 +scene0762_00 +scene0763_00 +scene0764_00 +scene0765_00 +scene0766_00 +scene0767_00 +scene0768_00 +scene0769_00 +scene0770_00 +scene0771_00 +scene0772_00 +scene0773_00 +scene0774_00 +scene0775_00 +scene0776_00 +scene0777_00 +scene0778_00 +scene0779_00 +scene0780_00 +scene0781_00 +scene0782_00 +scene0783_00 +scene0784_00 +scene0785_00 +scene0786_00 +scene0787_00 +scene0788_00 +scene0789_00 +scene0790_00 +scene0791_00 +scene0792_00 +scene0793_00 +scene0794_00 +scene0795_00 +scene0796_00 +scene0797_00 +scene0798_00 +scene0799_00 +scene0800_00 +scene0801_00 +scene0802_00 +scene0803_00 +scene0804_00 +scene0805_00 +scene0806_00 diff --git a/models/Scanrefer/data/scannet/meta_data/scannetv2_train.txt b/models/Scanrefer/data/scannet/meta_data/scannetv2_train.txt new file mode 100644 index 0000000..8c75dc7 --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/scannetv2_train.txt @@ -0,0 +1,1201 @@ +scene0000_00 +scene0000_01 +scene0000_02 +scene0001_00 +scene0001_01 +scene0002_00 +scene0002_01 +scene0003_00 +scene0003_01 +scene0003_02 +scene0004_00 +scene0005_00 +scene0005_01 +scene0006_00 +scene0006_01 +scene0006_02 +scene0007_00 +scene0008_00 +scene0009_00 +scene0009_01 +scene0009_02 +scene0010_00 +scene0010_01 +scene0012_00 +scene0012_01 +scene0012_02 +scene0013_00 +scene0013_01 +scene0013_02 +scene0014_00 +scene0016_00 +scene0016_01 +scene0016_02 +scene0017_00 +scene0017_01 +scene0017_02 +scene0018_00 +scene0020_00 +scene0020_01 +scene0021_00 +scene0022_00 +scene0022_01 +scene0023_00 +scene0024_00 +scene0024_01 +scene0024_02 +scene0026_00 +scene0027_00 +scene0027_01 +scene0027_02 +scene0028_00 +scene0029_00 +scene0029_01 +scene0029_02 +scene0031_00 +scene0031_01 +scene0031_02 +scene0032_00 +scene0032_01 +scene0033_00 +scene0034_00 +scene0034_01 +scene0034_02 +scene0035_00 +scene0035_01 +scene0036_00 +scene0036_01 +scene0037_00 +scene0038_00 +scene0038_01 +scene0038_02 +scene0039_00 +scene0039_01 +scene0040_00 +scene0040_01 +scene0041_00 +scene0041_01 +scene0042_00 +scene0042_01 +scene0042_02 +scene0043_00 +scene0043_01 +scene0044_00 +scene0044_01 +scene0044_02 +scene0045_00 +scene0045_01 +scene0047_00 +scene0048_00 +scene0048_01 +scene0049_00 +scene0051_00 +scene0051_01 +scene0051_02 +scene0051_03 +scene0052_00 +scene0052_01 +scene0052_02 +scene0053_00 +scene0054_00 +scene0055_00 +scene0055_01 +scene0055_02 +scene0056_00 +scene0056_01 +scene0057_00 +scene0057_01 +scene0058_00 +scene0058_01 +scene0059_00 +scene0059_01 +scene0059_02 +scene0060_00 +scene0060_01 +scene0061_00 +scene0061_01 +scene0062_00 +scene0062_01 +scene0062_02 +scene0065_00 +scene0065_01 +scene0065_02 +scene0066_00 +scene0067_00 +scene0067_01 +scene0067_02 +scene0068_00 +scene0068_01 +scene0069_00 +scene0070_00 +scene0071_00 +scene0072_00 +scene0072_01 +scene0072_02 +scene0073_00 +scene0073_01 +scene0073_02 +scene0073_03 +scene0074_00 +scene0074_01 +scene0074_02 +scene0075_00 +scene0076_00 +scene0078_00 +scene0078_01 +scene0078_02 +scene0079_00 +scene0079_01 +scene0080_00 +scene0080_01 +scene0080_02 +scene0082_00 +scene0083_00 +scene0083_01 +scene0085_00 +scene0085_01 +scene0087_00 +scene0087_01 +scene0087_02 +scene0089_00 +scene0089_01 +scene0089_02 +scene0090_00 +scene0091_00 +scene0092_00 +scene0092_01 +scene0092_02 +scene0092_03 +scene0092_04 +scene0093_00 +scene0093_01 +scene0093_02 +scene0094_00 +scene0096_00 +scene0096_01 +scene0096_02 +scene0097_00 +scene0098_00 +scene0098_01 +scene0099_00 +scene0099_01 +scene0101_00 +scene0101_01 +scene0101_02 +scene0101_03 +scene0101_04 +scene0101_05 +scene0102_00 +scene0102_01 +scene0103_00 +scene0103_01 +scene0104_00 +scene0105_00 +scene0105_01 +scene0105_02 +scene0106_00 +scene0106_01 +scene0106_02 +scene0107_00 +scene0108_00 +scene0109_00 +scene0109_01 +scene0110_00 +scene0110_01 +scene0110_02 +scene0111_00 +scene0111_01 +scene0111_02 +scene0112_00 +scene0112_01 +scene0112_02 +scene0113_00 +scene0113_01 +scene0114_00 +scene0114_01 +scene0114_02 +scene0115_00 +scene0115_01 +scene0115_02 +scene0116_00 +scene0116_01 +scene0116_02 +scene0117_00 +scene0118_00 +scene0118_01 +scene0118_02 +scene0119_00 +scene0120_00 +scene0120_01 +scene0121_00 +scene0121_01 +scene0121_02 +scene0122_00 +scene0122_01 +scene0123_00 +scene0123_01 +scene0123_02 +scene0124_00 +scene0124_01 +scene0125_00 +scene0126_00 +scene0126_01 +scene0126_02 +scene0127_00 +scene0127_01 +scene0128_00 +scene0129_00 +scene0130_00 +scene0132_00 +scene0132_01 +scene0132_02 +scene0133_00 +scene0134_00 +scene0134_01 +scene0134_02 +scene0135_00 +scene0136_00 +scene0136_01 +scene0136_02 +scene0137_00 +scene0137_01 +scene0137_02 +scene0138_00 +scene0140_00 +scene0140_01 +scene0141_00 +scene0141_01 +scene0141_02 +scene0142_00 +scene0142_01 +scene0143_00 +scene0143_01 +scene0143_02 +scene0145_00 +scene0147_00 +scene0147_01 +scene0148_00 +scene0150_00 +scene0150_01 +scene0150_02 +scene0151_00 +scene0151_01 +scene0152_00 +scene0152_01 +scene0152_02 +scene0154_00 +scene0155_00 +scene0155_01 +scene0155_02 +scene0156_00 +scene0157_00 +scene0157_01 +scene0158_00 +scene0158_01 +scene0158_02 +scene0159_00 +scene0160_00 +scene0160_01 +scene0160_02 +scene0160_03 +scene0160_04 +scene0161_00 +scene0161_01 +scene0161_02 +scene0162_00 +scene0163_00 +scene0163_01 +scene0165_00 +scene0165_01 +scene0165_02 +scene0166_00 +scene0166_01 +scene0166_02 +scene0167_00 +scene0168_00 +scene0168_01 +scene0168_02 +scene0170_00 +scene0170_01 +scene0170_02 +scene0171_00 +scene0171_01 +scene0172_00 +scene0172_01 +scene0173_00 +scene0173_01 +scene0173_02 +scene0174_00 +scene0174_01 +scene0175_00 +scene0176_00 +scene0177_00 +scene0177_01 +scene0177_02 +scene0178_00 +scene0179_00 +scene0180_00 +scene0181_00 +scene0181_01 +scene0181_02 +scene0181_03 +scene0182_00 +scene0182_01 +scene0182_02 +scene0183_00 +scene0184_00 +scene0185_00 +scene0186_00 +scene0186_01 +scene0188_00 +scene0189_00 +scene0190_00 +scene0191_00 +scene0191_01 +scene0191_02 +scene0192_00 +scene0192_01 +scene0192_02 +scene0194_00 +scene0195_00 +scene0195_01 +scene0195_02 +scene0197_00 +scene0197_01 +scene0197_02 +scene0198_00 +scene0199_00 +scene0200_00 +scene0200_01 +scene0200_02 +scene0201_00 +scene0201_01 +scene0201_02 +scene0202_00 +scene0204_00 +scene0204_01 +scene0204_02 +scene0205_00 +scene0205_01 +scene0205_02 +scene0206_00 +scene0206_01 +scene0206_02 +scene0209_00 +scene0209_01 +scene0209_02 +scene0210_00 +scene0210_01 +scene0211_00 +scene0211_01 +scene0211_02 +scene0211_03 +scene0212_00 +scene0212_01 +scene0212_02 +scene0213_00 +scene0214_00 +scene0214_01 +scene0214_02 +scene0215_00 +scene0215_01 +scene0216_00 +scene0218_00 +scene0218_01 +scene0219_00 +scene0220_00 +scene0220_01 +scene0220_02 +scene0223_00 +scene0223_01 +scene0223_02 +scene0224_00 +scene0225_00 +scene0226_00 +scene0226_01 +scene0227_00 +scene0228_00 +scene0229_00 +scene0229_01 +scene0229_02 +scene0230_00 +scene0232_00 +scene0232_01 +scene0232_02 +scene0233_00 +scene0233_01 +scene0234_00 +scene0235_00 +scene0236_00 +scene0236_01 +scene0237_00 +scene0237_01 +scene0238_00 +scene0238_01 +scene0239_00 +scene0239_01 +scene0239_02 +scene0240_00 +scene0241_00 +scene0241_01 +scene0241_02 +scene0242_00 +scene0242_01 +scene0242_02 +scene0243_00 +scene0244_00 +scene0244_01 +scene0245_00 +scene0247_00 +scene0247_01 +scene0248_00 +scene0248_01 +scene0248_02 +scene0250_00 +scene0250_01 +scene0250_02 +scene0252_00 +scene0253_00 +scene0254_00 +scene0254_01 +scene0255_00 +scene0255_01 +scene0255_02 +scene0258_00 +scene0259_00 +scene0259_01 +scene0260_00 +scene0260_01 +scene0260_02 +scene0261_00 +scene0261_01 +scene0261_02 +scene0261_03 +scene0262_00 +scene0262_01 +scene0263_00 +scene0263_01 +scene0264_00 +scene0264_01 +scene0264_02 +scene0265_00 +scene0265_01 +scene0265_02 +scene0266_00 +scene0266_01 +scene0267_00 +scene0268_00 +scene0268_01 +scene0268_02 +scene0269_00 +scene0269_01 +scene0269_02 +scene0270_00 +scene0270_01 +scene0270_02 +scene0271_00 +scene0271_01 +scene0272_00 +scene0272_01 +scene0273_00 +scene0273_01 +scene0274_00 +scene0274_01 +scene0274_02 +scene0275_00 +scene0276_00 +scene0276_01 +scene0279_00 +scene0279_01 +scene0279_02 +scene0280_00 +scene0280_01 +scene0280_02 +scene0281_00 +scene0282_00 +scene0282_01 +scene0282_02 +scene0283_00 +scene0284_00 +scene0285_00 +scene0286_00 +scene0286_01 +scene0286_02 +scene0286_03 +scene0287_00 +scene0288_00 +scene0288_01 +scene0288_02 +scene0289_00 +scene0289_01 +scene0290_00 +scene0291_00 +scene0291_01 +scene0291_02 +scene0292_00 +scene0292_01 +scene0293_00 +scene0293_01 +scene0294_00 +scene0294_01 +scene0294_02 +scene0295_00 +scene0295_01 +scene0296_00 +scene0296_01 +scene0297_00 +scene0297_01 +scene0297_02 +scene0298_00 +scene0299_00 +scene0299_01 +scene0301_00 +scene0301_01 +scene0301_02 +scene0302_00 +scene0302_01 +scene0303_00 +scene0303_01 +scene0303_02 +scene0305_00 +scene0305_01 +scene0306_00 +scene0306_01 +scene0308_00 +scene0309_00 +scene0309_01 +scene0310_00 +scene0310_01 +scene0310_02 +scene0311_00 +scene0312_00 +scene0312_01 +scene0312_02 +scene0313_00 +scene0313_01 +scene0313_02 +scene0315_00 +scene0317_00 +scene0317_01 +scene0318_00 +scene0319_00 +scene0320_00 +scene0320_01 +scene0320_02 +scene0320_03 +scene0321_00 +scene0322_00 +scene0323_00 +scene0323_01 +scene0324_00 +scene0324_01 +scene0325_00 +scene0325_01 +scene0326_00 +scene0327_00 +scene0330_00 +scene0331_00 +scene0331_01 +scene0332_00 +scene0332_01 +scene0332_02 +scene0333_00 +scene0335_00 +scene0335_01 +scene0335_02 +scene0336_00 +scene0336_01 +scene0337_00 +scene0337_01 +scene0337_02 +scene0339_00 +scene0340_00 +scene0340_01 +scene0340_02 +scene0341_00 +scene0341_01 +scene0344_00 +scene0344_01 +scene0345_00 +scene0345_01 +scene0346_00 +scene0346_01 +scene0347_00 +scene0347_01 +scene0347_02 +scene0348_00 +scene0348_01 +scene0348_02 +scene0349_00 +scene0349_01 +scene0350_00 +scene0350_01 +scene0350_02 +scene0352_00 +scene0352_01 +scene0352_02 +scene0358_00 +scene0358_01 +scene0358_02 +scene0359_00 +scene0359_01 +scene0360_00 +scene0361_00 +scene0361_01 +scene0361_02 +scene0362_00 +scene0362_01 +scene0362_02 +scene0362_03 +scene0363_00 +scene0364_00 +scene0364_01 +scene0365_00 +scene0365_01 +scene0365_02 +scene0366_00 +scene0367_00 +scene0367_01 +scene0368_00 +scene0368_01 +scene0369_00 +scene0369_01 +scene0369_02 +scene0370_00 +scene0370_01 +scene0370_02 +scene0371_00 +scene0371_01 +scene0372_00 +scene0373_00 +scene0373_01 +scene0374_00 +scene0375_00 +scene0375_01 +scene0375_02 +scene0376_00 +scene0376_01 +scene0376_02 +scene0379_00 +scene0380_00 +scene0380_01 +scene0380_02 +scene0381_00 +scene0381_01 +scene0381_02 +scene0383_00 +scene0383_01 +scene0383_02 +scene0384_00 +scene0385_00 +scene0385_01 +scene0385_02 +scene0386_00 +scene0387_00 +scene0387_01 +scene0387_02 +scene0388_00 +scene0388_01 +scene0390_00 +scene0391_00 +scene0392_00 +scene0392_01 +scene0392_02 +scene0393_00 +scene0393_01 +scene0393_02 +scene0394_00 +scene0394_01 +scene0395_00 +scene0395_01 +scene0395_02 +scene0396_00 +scene0396_01 +scene0396_02 +scene0397_00 +scene0397_01 +scene0398_00 +scene0398_01 +scene0399_00 +scene0399_01 +scene0400_00 +scene0400_01 +scene0401_00 +scene0402_00 +scene0403_00 +scene0403_01 +scene0404_00 +scene0404_01 +scene0404_02 +scene0405_00 +scene0407_00 +scene0407_01 +scene0408_00 +scene0408_01 +scene0409_00 +scene0409_01 +scene0410_00 +scene0410_01 +scene0411_00 +scene0411_01 +scene0411_02 +scene0413_00 +scene0415_00 +scene0415_01 +scene0415_02 +scene0416_00 +scene0416_01 +scene0416_02 +scene0416_03 +scene0416_04 +scene0417_00 +scene0418_00 +scene0418_01 +scene0418_02 +scene0419_00 +scene0419_01 +scene0419_02 +scene0420_00 +scene0420_01 +scene0420_02 +scene0421_00 +scene0421_01 +scene0421_02 +scene0422_00 +scene0424_00 +scene0424_01 +scene0424_02 +scene0425_00 +scene0425_01 +scene0428_00 +scene0428_01 +scene0429_00 +scene0431_00 +scene0433_00 +scene0434_00 +scene0434_01 +scene0434_02 +scene0436_00 +scene0437_00 +scene0437_01 +scene0438_00 +scene0439_00 +scene0439_01 +scene0440_00 +scene0440_01 +scene0440_02 +scene0442_00 +scene0443_00 +scene0444_00 +scene0444_01 +scene0445_00 +scene0445_01 +scene0446_00 +scene0446_01 +scene0447_00 +scene0447_01 +scene0447_02 +scene0448_00 +scene0448_01 +scene0448_02 +scene0449_00 +scene0449_01 +scene0449_02 +scene0450_00 +scene0451_00 +scene0451_01 +scene0451_02 +scene0451_03 +scene0451_04 +scene0451_05 +scene0452_00 +scene0452_01 +scene0452_02 +scene0453_00 +scene0453_01 +scene0454_00 +scene0455_00 +scene0456_00 +scene0456_01 +scene0457_00 +scene0457_01 +scene0457_02 +scene0459_00 +scene0459_01 +scene0460_00 +scene0463_00 +scene0463_01 +scene0464_00 +scene0465_00 +scene0465_01 +scene0466_00 +scene0466_01 +scene0467_00 +scene0468_00 +scene0468_01 +scene0468_02 +scene0469_00 +scene0469_01 +scene0469_02 +scene0470_00 +scene0470_01 +scene0471_00 +scene0471_01 +scene0471_02 +scene0472_00 +scene0472_01 +scene0472_02 +scene0473_00 +scene0473_01 +scene0475_00 +scene0475_01 +scene0475_02 +scene0476_00 +scene0476_01 +scene0476_02 +scene0477_00 +scene0477_01 +scene0478_00 +scene0478_01 +scene0479_00 +scene0479_01 +scene0479_02 +scene0480_00 +scene0480_01 +scene0481_00 +scene0481_01 +scene0482_00 +scene0482_01 +scene0483_00 +scene0484_00 +scene0484_01 +scene0485_00 +scene0486_00 +scene0487_00 +scene0487_01 +scene0489_00 +scene0489_01 +scene0489_02 +scene0491_00 +scene0492_00 +scene0492_01 +scene0493_00 +scene0493_01 +scene0495_00 +scene0497_00 +scene0498_00 +scene0498_01 +scene0498_02 +scene0499_00 +scene0501_00 +scene0501_01 +scene0501_02 +scene0502_00 +scene0502_01 +scene0502_02 +scene0503_00 +scene0504_00 +scene0505_00 +scene0505_01 +scene0505_02 +scene0505_03 +scene0505_04 +scene0506_00 +scene0507_00 +scene0508_00 +scene0508_01 +scene0508_02 +scene0509_00 +scene0509_01 +scene0509_02 +scene0510_00 +scene0510_01 +scene0510_02 +scene0511_00 +scene0511_01 +scene0512_00 +scene0513_00 +scene0514_00 +scene0514_01 +scene0515_00 +scene0515_01 +scene0515_02 +scene0516_00 +scene0516_01 +scene0517_00 +scene0517_01 +scene0517_02 +scene0519_00 +scene0520_00 +scene0520_01 +scene0521_00 +scene0522_00 +scene0523_00 +scene0523_01 +scene0523_02 +scene0524_00 +scene0524_01 +scene0525_00 +scene0525_01 +scene0525_02 +scene0526_00 +scene0526_01 +scene0528_00 +scene0528_01 +scene0529_00 +scene0529_01 +scene0529_02 +scene0530_00 +scene0531_00 +scene0532_00 +scene0532_01 +scene0533_00 +scene0533_01 +scene0534_00 +scene0534_01 +scene0536_00 +scene0536_01 +scene0536_02 +scene0537_00 +scene0538_00 +scene0539_00 +scene0539_01 +scene0539_02 +scene0540_00 +scene0540_01 +scene0540_02 +scene0541_00 +scene0541_01 +scene0541_02 +scene0542_00 +scene0543_00 +scene0543_01 +scene0543_02 +scene0544_00 +scene0545_00 +scene0545_01 +scene0545_02 +scene0546_00 +scene0547_00 +scene0547_01 +scene0547_02 +scene0548_00 +scene0548_01 +scene0548_02 +scene0551_00 +scene0554_00 +scene0554_01 +scene0555_00 +scene0556_00 +scene0556_01 +scene0557_00 +scene0557_01 +scene0557_02 +scene0560_00 +scene0561_00 +scene0561_01 +scene0562_00 +scene0563_00 +scene0564_00 +scene0566_00 +scene0567_00 +scene0567_01 +scene0569_00 +scene0569_01 +scene0570_00 +scene0570_01 +scene0570_02 +scene0571_00 +scene0571_01 +scene0572_00 +scene0572_01 +scene0572_02 +scene0573_00 +scene0573_01 +scene0576_00 +scene0576_01 +scene0576_02 +scene0577_00 +scene0579_00 +scene0579_01 +scene0579_02 +scene0581_00 +scene0581_01 +scene0581_02 +scene0582_00 +scene0582_01 +scene0582_02 +scene0584_00 +scene0584_01 +scene0584_02 +scene0585_00 +scene0585_01 +scene0586_00 +scene0586_01 +scene0586_02 +scene0587_00 +scene0587_01 +scene0587_02 +scene0587_03 +scene0588_00 +scene0588_01 +scene0588_02 +scene0588_03 +scene0589_00 +scene0589_01 +scene0589_02 +scene0590_00 +scene0590_01 +scene0592_00 +scene0592_01 +scene0594_00 +scene0596_00 +scene0596_01 +scene0596_02 +scene0597_00 +scene0597_01 +scene0597_02 +scene0600_00 +scene0600_01 +scene0600_02 +scene0601_00 +scene0601_01 +scene0602_00 +scene0603_00 +scene0603_01 +scene0604_00 +scene0604_01 +scene0604_02 +scene0605_00 +scene0605_01 +scene0610_00 +scene0610_01 +scene0610_02 +scene0611_00 +scene0611_01 +scene0612_00 +scene0612_01 +scene0613_00 +scene0613_01 +scene0613_02 +scene0614_00 +scene0614_01 +scene0614_02 +scene0615_00 +scene0615_01 +scene0617_00 +scene0619_00 +scene0620_00 +scene0620_01 +scene0622_00 +scene0622_01 +scene0623_00 +scene0623_01 +scene0624_00 +scene0625_00 +scene0625_01 +scene0626_00 +scene0626_01 +scene0626_02 +scene0627_00 +scene0627_01 +scene0628_00 +scene0628_01 +scene0628_02 +scene0630_00 +scene0630_01 +scene0630_02 +scene0630_03 +scene0630_04 +scene0630_05 +scene0630_06 +scene0631_00 +scene0631_01 +scene0631_02 +scene0632_00 +scene0634_00 +scene0635_00 +scene0635_01 +scene0636_00 +scene0637_00 +scene0638_00 +scene0639_00 +scene0640_00 +scene0640_01 +scene0640_02 +scene0641_00 +scene0642_00 +scene0642_01 +scene0642_02 +scene0642_03 +scene0646_00 +scene0646_01 +scene0646_02 +scene0649_00 +scene0649_01 +scene0650_00 +scene0654_00 +scene0654_01 +scene0656_00 +scene0656_01 +scene0656_02 +scene0656_03 +scene0657_00 +scene0659_00 +scene0659_01 +scene0661_00 +scene0662_00 +scene0662_01 +scene0662_02 +scene0666_00 +scene0666_01 +scene0666_02 +scene0667_00 +scene0667_01 +scene0667_02 +scene0668_00 +scene0669_00 +scene0669_01 +scene0672_00 +scene0672_01 +scene0673_00 +scene0673_01 +scene0673_02 +scene0673_03 +scene0673_04 +scene0673_05 +scene0674_00 +scene0674_01 +scene0675_00 +scene0675_01 +scene0676_00 +scene0676_01 +scene0677_00 +scene0677_01 +scene0677_02 +scene0679_00 +scene0679_01 +scene0680_00 +scene0680_01 +scene0681_00 +scene0682_00 +scene0683_00 +scene0687_00 +scene0688_00 +scene0691_00 +scene0691_01 +scene0692_00 +scene0692_01 +scene0692_02 +scene0692_03 +scene0692_04 +scene0694_00 +scene0694_01 +scene0698_00 +scene0698_01 +scene0703_00 +scene0703_01 +scene0705_00 +scene0705_01 +scene0705_02 +scene0706_00 diff --git a/models/Scanrefer/data/scannet/meta_data/scannetv2_val.txt b/models/Scanrefer/data/scannet/meta_data/scannetv2_val.txt new file mode 100644 index 0000000..36e02b3 --- /dev/null +++ b/models/Scanrefer/data/scannet/meta_data/scannetv2_val.txt @@ -0,0 +1,312 @@ +scene0011_00 +scene0011_01 +scene0015_00 +scene0019_00 +scene0019_01 +scene0025_00 +scene0025_01 +scene0025_02 +scene0030_00 +scene0030_01 +scene0030_02 +scene0046_00 +scene0046_01 +scene0046_02 +scene0050_00 +scene0050_01 +scene0050_02 +scene0063_00 +scene0064_00 +scene0064_01 +scene0077_00 +scene0077_01 +scene0081_00 +scene0081_01 +scene0081_02 +scene0084_00 +scene0084_01 +scene0084_02 +scene0086_00 +scene0086_01 +scene0086_02 +scene0088_00 +scene0088_01 +scene0088_02 +scene0088_03 +scene0095_00 +scene0095_01 +scene0100_00 +scene0100_01 +scene0100_02 +scene0131_00 +scene0131_01 +scene0131_02 +scene0139_00 +scene0144_00 +scene0144_01 +scene0146_00 +scene0146_01 +scene0146_02 +scene0149_00 +scene0153_00 +scene0153_01 +scene0164_00 +scene0164_01 +scene0164_02 +scene0164_03 +scene0169_00 +scene0169_01 +scene0187_00 +scene0187_01 +scene0193_00 +scene0193_01 +scene0196_00 +scene0203_00 +scene0203_01 +scene0203_02 +scene0207_00 +scene0207_01 +scene0207_02 +scene0208_00 +scene0217_00 +scene0221_00 +scene0221_01 +scene0222_00 +scene0222_01 +scene0231_00 +scene0231_01 +scene0231_02 +scene0246_00 +scene0249_00 +scene0251_00 +scene0256_00 +scene0256_01 +scene0256_02 +scene0257_00 +scene0277_00 +scene0277_01 +scene0277_02 +scene0278_00 +scene0278_01 +scene0300_00 +scene0300_01 +scene0304_00 +scene0307_00 +scene0307_01 +scene0307_02 +scene0314_00 +scene0316_00 +scene0328_00 +scene0329_00 +scene0329_01 +scene0329_02 +scene0334_00 +scene0334_01 +scene0334_02 +scene0338_00 +scene0338_01 +scene0338_02 +scene0342_00 +scene0343_00 +scene0351_00 +scene0351_01 +scene0353_00 +scene0353_01 +scene0353_02 +scene0354_00 +scene0355_00 +scene0355_01 +scene0356_00 +scene0356_01 +scene0356_02 +scene0357_00 +scene0357_01 +scene0377_00 +scene0377_01 +scene0377_02 +scene0378_00 +scene0378_01 +scene0378_02 +scene0382_00 +scene0382_01 +scene0389_00 +scene0406_00 +scene0406_01 +scene0406_02 +scene0412_00 +scene0412_01 +scene0414_00 +scene0423_00 +scene0423_01 +scene0423_02 +scene0426_00 +scene0426_01 +scene0426_02 +scene0426_03 +scene0427_00 +scene0430_00 +scene0430_01 +scene0432_00 +scene0432_01 +scene0435_00 +scene0435_01 +scene0435_02 +scene0435_03 +scene0441_00 +scene0458_00 +scene0458_01 +scene0461_00 +scene0462_00 +scene0474_00 +scene0474_01 +scene0474_02 +scene0474_03 +scene0474_04 +scene0474_05 +scene0488_00 +scene0488_01 +scene0490_00 +scene0494_00 +scene0496_00 +scene0500_00 +scene0500_01 +scene0518_00 +scene0527_00 +scene0535_00 +scene0549_00 +scene0549_01 +scene0550_00 +scene0552_00 +scene0552_01 +scene0553_00 +scene0553_01 +scene0553_02 +scene0558_00 +scene0558_01 +scene0558_02 +scene0559_00 +scene0559_01 +scene0559_02 +scene0565_00 +scene0568_00 +scene0568_01 +scene0568_02 +scene0574_00 +scene0574_01 +scene0574_02 +scene0575_00 +scene0575_01 +scene0575_02 +scene0578_00 +scene0578_01 +scene0578_02 +scene0580_00 +scene0580_01 +scene0583_00 +scene0583_01 +scene0583_02 +scene0591_00 +scene0591_01 +scene0591_02 +scene0593_00 +scene0593_01 +scene0595_00 +scene0598_00 +scene0598_01 +scene0598_02 +scene0599_00 +scene0599_01 +scene0599_02 +scene0606_00 +scene0606_01 +scene0606_02 +scene0607_00 +scene0607_01 +scene0608_00 +scene0608_01 +scene0608_02 +scene0609_00 +scene0609_01 +scene0609_02 +scene0609_03 +scene0616_00 +scene0616_01 +scene0618_00 +scene0621_00 +scene0629_00 +scene0629_01 +scene0629_02 +scene0633_00 +scene0633_01 +scene0643_00 +scene0644_00 +scene0645_00 +scene0645_01 +scene0645_02 +scene0647_00 +scene0647_01 +scene0648_00 +scene0648_01 +scene0651_00 +scene0651_01 +scene0651_02 +scene0652_00 +scene0653_00 +scene0653_01 +scene0655_00 +scene0655_01 +scene0655_02 +scene0658_00 +scene0660_00 +scene0663_00 +scene0663_01 +scene0663_02 +scene0664_00 +scene0664_01 +scene0664_02 +scene0665_00 +scene0665_01 +scene0670_00 +scene0670_01 +scene0671_00 +scene0671_01 +scene0678_00 +scene0678_01 +scene0678_02 +scene0684_00 +scene0684_01 +scene0685_00 +scene0685_01 +scene0685_02 +scene0686_00 +scene0686_01 +scene0686_02 +scene0689_00 +scene0690_00 +scene0690_01 +scene0693_00 +scene0693_01 +scene0693_02 +scene0695_00 +scene0695_01 +scene0695_02 +scene0695_03 +scene0696_00 +scene0696_01 +scene0696_02 +scene0697_00 +scene0697_01 +scene0697_02 +scene0697_03 +scene0699_00 +scene0700_00 +scene0700_01 +scene0700_02 +scene0701_00 +scene0701_01 +scene0701_02 +scene0702_00 +scene0702_01 +scene0702_02 +scene0704_00 +scene0704_01 diff --git a/models/Scanrefer/data/scannet/model_util_scannet.py b/models/Scanrefer/data/scannet/model_util_scannet.py new file mode 100644 index 0000000..af0b3c0 --- /dev/null +++ b/models/Scanrefer/data/scannet/model_util_scannet.py @@ -0,0 +1,175 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/model_util_scannet.py.""" + +import os +import sys + +import numpy as np + +sys.path.append(os.path.join(os.getcwd(), os.pardir, + 'lib')) # HACK add the lib folder +from lib.config import CONF +from utils.box_util import get_3d_box + + +def in_hull(p, hull): + from scipy.spatial import Delaunay + if not isinstance(hull, Delaunay): + hull = Delaunay(hull) + return hull.find_simplex(p) >= 0 + + +def extract_pc_in_box3d(pc, box3d): + ''' pc: (N,3), box3d: (8,3) ''' + box3d_roi_inds = in_hull(pc[:, 0:3], box3d) + return pc[box3d_roi_inds, :], box3d_roi_inds + + +def rotate_aligned_boxes(input_boxes, rot_mat): + centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6] + new_centers = np.dot(centers, np.transpose(rot_mat)) + + dx, dy = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0 + new_x = np.zeros((dx.shape[0], 4)) + new_y = np.zeros((dx.shape[0], 4)) + + for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]): + crnrs = np.zeros((dx.shape[0], 3)) + crnrs[:, 0] = crnr[0] * dx + crnrs[:, 1] = crnr[1] * dy + crnrs = np.dot(crnrs, np.transpose(rot_mat)) + new_x[:, i] = crnrs[:, 0] + new_y[:, i] = crnrs[:, 1] + + new_dx = 2.0 * np.max(new_x, 1) + new_dy = 2.0 * np.max(new_y, 1) + new_lengths = np.stack((new_dx, new_dy, lengths[:, 2]), axis=1) + + return np.concatenate([new_centers, new_lengths], axis=1) + + +def rotate_aligned_boxes_along_axis(input_boxes, rot_mat, axis): + centers, lengths = input_boxes[:, 0:3], input_boxes[:, 3:6] + new_centers = np.dot(centers, np.transpose(rot_mat)) + + if axis == 'x': + d1, d2 = lengths[:, 1] / 2.0, lengths[:, 2] / 2.0 + elif axis == 'y': + d1, d2 = lengths[:, 0] / 2.0, lengths[:, 2] / 2.0 + else: + d1, d2 = lengths[:, 0] / 2.0, lengths[:, 1] / 2.0 + + new_1 = np.zeros((d1.shape[0], 4)) + new_2 = np.zeros((d1.shape[0], 4)) + + for i, crnr in enumerate([(-1, -1), (1, -1), (1, 1), (-1, 1)]): + crnrs = np.zeros((d1.shape[0], 3)) + crnrs[:, 0] = crnr[0] * d1 + crnrs[:, 1] = crnr[1] * d2 + crnrs = np.dot(crnrs, np.transpose(rot_mat)) + new_1[:, i] = crnrs[:, 0] + new_2[:, i] = crnrs[:, 1] + + new_d1 = 2.0 * np.max(new_1, 1) + new_d2 = 2.0 * np.max(new_2, 1) + + if axis == 'x': + new_lengths = np.stack((lengths[:, 0], new_d1, new_d2), axis=1) + elif axis == 'y': + new_lengths = np.stack((new_d1, lengths[:, 1], new_d2), axis=1) + else: + new_lengths = np.stack((new_d1, new_d2, lengths[:, 2]), axis=1) + + return np.concatenate([new_centers, new_lengths], axis=1) + + +class ScannetDatasetConfig(object): + + def __init__(self): + + from data.scannet.meta_data.es_type_id import es_type_dict + + self.type2class = es_type_dict + self.type2class = {k: v - 1 for k, v in self.type2class.items()} + # self.type2class = {'cabinet':0, 'bed':1, 'chair':2, 'sofa':3, 'table':4, 'door':5, + # 'window':6,'bookshelf':7,'picture':8, 'counter':9, 'desk':10, 'curtain':11, + # 'refrigerator':12, 'shower curtain':13, 'toilet':14, 'sink':15, 'bathtub':16, 'others':17} + self.class2type = {self.type2class[t]: t for t in self.type2class} + + self.nyu40id2class = {i: i for i in range(300)} # just itself + + self.mean_size_arr = np.load( + f'{os.getcwd()}/data/scannet/meta_data/mean_size_array.npy') + self.num_class = len(self.type2class.keys()) + self.num_heading_bin = 1 + self.num_size_cluster = len(self.type2class.keys()) + + self.type_mean_size = {} + for i in range(self.num_size_cluster): + self.type_mean_size[self.class2type[i]] = self.mean_size_arr[i, :] + + def angle2class(self, angle): + """Convert continuous angle to discrete class. + + [optinal] also small regression number from + class center angle to current angle. + + angle is from 0-2pi (or -pi~pi), class center at 0, 1*(2pi/N), 2*(2pi/N) ... (N-1)*(2pi/N) + return is class of int32 of 0,1,...,N-1 and a number such that + class*(2pi/N) + number = angle + + NOT USED. + """ + assert (False) + + def class2angle(self, pred_cls, residual, to_label_format=True): + """Inverse function to angle2class. + + As ScanNet only has axis-alined boxes so angles are always 0. + """ + return 0 + + def class2angle_batch(self, pred_cls, residual, to_label_format=True): + """Inverse function to angle2class. + + As ScanNet only has axis-alined boxes so angles are always 0. + """ + return np.zeros(pred_cls.shape[0]) + + def size2class(self, size, type_name): + """Convert 3D box size (l,w,h) to size class and size residual.""" + size_class = self.type2class[type_name] + size_residual = size - self.type_mean_size[type_name] + return size_class, size_residual + + def class2size(self, pred_cls, residual): + """Inverse function to size2class.""" + return self.mean_size_arr[pred_cls] + residual + + def class2size_batch(self, pred_cls, residual): + """Inverse function to size2class.""" + return self.mean_size_arr[pred_cls] + residual + + def param2obb(self, center, heading_class, heading_residual, size_class, + size_residual): + heading_angle = self.class2angle(heading_class, heading_residual) + box_size = self.class2size(int(size_class), size_residual) + obb = np.zeros((7, )) + obb[0:3] = center + obb[3:6] = box_size + obb[6] = heading_angle * -1 + return obb + + def param2obb_batch(self, center, heading_class, heading_residual, + size_class, size_residual): + heading_angle = self.class2angle_batch(heading_class, heading_residual) + box_size = self.class2size_batch(size_class, size_residual) + obb = np.zeros((heading_class.shape[0], 7)) + obb[:, 0:3] = center + obb[:, 3:6] = box_size + obb[:, 6] = heading_angle * -1 + return obb + + +if __name__ == '__main__': + showing = ScannetDatasetConfig() diff --git a/models/Scanrefer/data/scannet/scannet_utils.py b/models/Scanrefer/data/scannet/scannet_utils.py new file mode 100644 index 0000000..996f675 --- /dev/null +++ b/models/Scanrefer/data/scannet/scannet_utils.py @@ -0,0 +1,125 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/scann +et/scannet_utils.py.""" + +import csv +import json +import os +import sys + +import numpy as np + +try: + from plyfile import PlyData, PlyElement +except: + print("Please install the module 'plyfile' for PLY i/o, e.g.") + print('pip install plyfile') + sys.exit(-1) + + +def normalize_v3(arr): + """Normalize a numpy array of 3 component vectors shape=(n,3)""" + lens = np.sqrt(arr[:, 0]**2 + arr[:, 1]**2 + arr[:, 2]**2) + arr[:, 0] /= (lens + 1e-8) + arr[:, 1] /= (lens + 1e-8) + arr[:, 2] /= (lens + 1e-8) + return arr + + +def compute_normal(vertices, faces): + #Create a zeroed array with the same type and shape as our vertices i.e., per vertex normal + normals = np.zeros(vertices.shape, dtype=vertices.dtype) + #Create an indexed view into the vertex array using the array of three indices for triangles + tris = vertices[faces] + #Calculate the normal for all the triangles, by taking the cross product of the vectors v1-v0, and v2-v0 in each triangle + n = np.cross(tris[::, 1] - tris[::, 0], tris[::, 2] - tris[::, 0]) + # n is now an array of normals per triangle. The length of each normal is dependent the vertices, + # we need to normalize these, so that our next step weights each normal equally. + normalize_v3(n) + # now we have a normalized array of normals, one per triangle, i.e., per triangle normals. + # But instead of one per triangle (i.e., flat shading), we add to each vertex in that triangle, + # the triangles' normal. Multiple triangles would then contribute to every vertex, so we need to normalize again afterwards. + # The cool part, we can actually add the normals through an indexed view of our (zeroed) per vertex normal array + normals[faces[:, 0]] += n + normals[faces[:, 1]] += n + normals[faces[:, 2]] += n + normalize_v3(normals) + + return normals + + +def represents_int(s): + """if string s represents an int.""" + try: + int(s) + return True + except ValueError: + return False + + +def read_label_mapping(filename, + label_from='raw_category', + label_to='nyu40id'): + assert os.path.isfile(filename) + mapping = dict() + with open(filename) as csvfile: + reader = csv.DictReader(csvfile, delimiter='\t') + for row in reader: + mapping[row[label_from]] = int(row[label_to]) + if represents_int(list(mapping.keys())[0]): + mapping = {int(k): v for k, v in mapping.items()} + return mapping + + +def read_mesh_vertices(filename): + """read XYZ for each vertex.""" + assert os.path.isfile(filename) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 3], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + return vertices + + +def read_mesh_vertices_rgb(filename): + """read XYZ RGB for each vertex. + + Note: RGB values are in 0-255 + """ + assert os.path.isfile(filename) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + vertices[:, 3] = plydata['vertex'].data['red'] + vertices[:, 4] = plydata['vertex'].data['green'] + vertices[:, 5] = plydata['vertex'].data['blue'] + return vertices + + +def read_mesh_vertices_rgb_normal(filename): + """read XYZ RGB normals point cloud from filename PLY file.""" + assert (os.path.isfile(filename)) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 9], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + vertices[:, 3] = plydata['vertex'].data['red'] + vertices[:, 4] = plydata['vertex'].data['green'] + vertices[:, 5] = plydata['vertex'].data['blue'] + + # compute normals + xyz = np.array([[x, y, z] + for x, y, z, _, _, _, _ in plydata['vertex'].data]) + face = np.array([f[0] for f in plydata['face'].data]) + nxnynz = compute_normal(xyz, face) + vertices[:, 6:] = nxnynz + return vertices diff --git a/models/Scanrefer/data/scannet/visualize.py b/models/Scanrefer/data/scannet/visualize.py new file mode 100644 index 0000000..d36981c --- /dev/null +++ b/models/Scanrefer/data/scannet/visualize.py @@ -0,0 +1,29 @@ +import argparse +import os + +import numpy as np + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scene_id', + type=str, + help='scene id of scene to be visualized', + default='scene0000_00') + args = parser.parse_args() + + verts = np.load('scannet_data/{}_vert.npy'.format(args.scene_id)) + aligned_verts = np.load('scannet_data/{}_aligned_vert.npy'.format( + args.scene_id)) + + with open('scannet_data/{}_verts.obj'.format(args.scene_id), 'w') as f: + for i in range(verts.shape[0]): + f.write('v {} {} {} {} {} {}\n'.format(verts[i, 0], verts[i, 1], + verts[i, 2], verts[i, 3], + verts[i, 4], verts[i, 5])) + + with open('scannet_data/{}_aligned_verts.obj'.format(args.scene_id), + 'w') as f: + for i in range(aligned_verts.shape[0]): + f.write('v {} {} {} {} {} {}\n'.format( + aligned_verts[i, 0], aligned_verts[i, 1], aligned_verts[i, 2], + aligned_verts[i, 3], aligned_verts[i, 4], aligned_verts[i, 5])) diff --git a/models/Scanrefer/eval.sh b/models/Scanrefer/eval.sh new file mode 100644 index 0000000..4644267 --- /dev/null +++ b/models/Scanrefer/eval.sh @@ -0,0 +1,2 @@ +python -u scripts/train.py --use_color --eval_only --use_checkpoint "/mnt/petrelfs/linjingli/tmp/code/MMScan-code/VG/benchmark/ScanRefer/output-train-wo-augment/2024-10-17_18-34-43/model_last.pth" +#"/mnt/petrelfs/linjingli/tmp/code/MMScan-code/VG/benchmark/ScanRefer/output-train-wi-augment/2024-10-20_22-53-34/model_last.pth" diff --git a/models/Scanrefer/lib/ap_helper.py b/models/Scanrefer/lib/ap_helper.py new file mode 100644 index 0000000..7da01aa --- /dev/null +++ b/models/Scanrefer/lib/ap_helper.py @@ -0,0 +1,328 @@ +"""Helper functions and class to calculate Average Precisions for 3D object +detection. + +Modified from: https://github.com/facebookresearch/votenet/blob/master/models/ap_helper.py +""" +import os +import sys + +import numpy as np +import torch + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from data.scannet.model_util_scannet import extract_pc_in_box3d +from utils.box_util import get_3d_box +from utils.eval_det import eval_det_cls, eval_det_multiprocessing, get_iou_obb +from utils.nms import nms_2d_faster, nms_3d_faster, nms_3d_faster_samecls + + +def flip_axis_to_camera(pc): + """Flip X-right,Y-forward,Z-up to X-right,Y-down,Z-forward Input and output + are both (N,3) array.""" + pc2 = np.copy(pc) + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # cam X,Y,Z = depth X,-Z,Y + pc2[..., 1] *= -1 + return pc2 + + +def flip_axis_to_depth(pc): + pc2 = np.copy(pc) + pc2[..., [0, 1, 2]] = pc2[..., [0, 2, 1]] # depth X,Y,Z = cam X,Z,-Y + pc2[..., 2] *= -1 + return pc2 + + +def softmax(x): + """Numpy function for softmax.""" + shape = x.shape + probs = np.exp(x - np.max(x, axis=len(shape) - 1, keepdims=True)) + probs /= np.sum(probs, axis=len(shape) - 1, keepdims=True) + return probs + + +def parse_predictions(end_points, config_dict): + """Parse predictions to OBB parameters and suppress overlapping boxes. + + Args: + end_points: dict + {point_clouds, center, heading_scores, heading_residuals, + size_scores, size_residuals, sem_cls_scores} + config_dict: dict + {dataset_config, remove_empty_box, use_3d_nms, nms_iou, + use_old_type_nms, conf_thresh, per_class_proposal} + + Returns: + batch_pred_map_cls: a list of len == batch size (BS) + [pred_list_i], i = 0, 1, ..., BS-1 + where pred_list_i = [(pred_sem_cls, box_params, box_score)_j] + where j = 0, ..., num of valid detections - 1 from sample input i + """ + pred_center = end_points['center'] # B,num_proposal,3 + pred_heading_class = torch.argmax(end_points['heading_scores'], + -1) # B,num_proposal + pred_heading_residual = torch.gather( + end_points['heading_residuals'], 2, + pred_heading_class.unsqueeze(-1)) # B,num_proposal,1 + pred_heading_residual.squeeze_(2) + pred_size_class = torch.argmax(end_points['size_scores'], + -1) # B,num_proposal + pred_size_residual = torch.gather( + end_points['size_residuals'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat( + 1, 1, 1, 3)) # B,num_proposal,1,3 + pred_size_residual.squeeze_(2) + pred_sem_cls = torch.argmax(end_points['sem_cls_scores'], + -1) # B,num_proposal + sem_cls_probs = softmax(end_points['sem_cls_scores'].detach().cpu().numpy( + )) # B,num_proposal,10 + pred_sem_cls_prob = np.max(sem_cls_probs, -1) # B,num_proposal + + num_proposal = pred_center.shape[1] + # Since we operate in upright_depth coord for points, while util functions + # assume upright_camera coord. + bsize = pred_center.shape[0] + pred_corners_3d_upright_camera = np.zeros((bsize, num_proposal, 8, 3)) + # pred_center_upright_camera = flip_axis_to_camera(pred_center.detach().cpu().numpy()) + pred_center_upright_camera = pred_center.detach().cpu().numpy() + for i in range(bsize): + for j in range(num_proposal): + heading_angle = config_dict['dataset_config'].class2angle(\ + pred_heading_class[i,j].detach().cpu().numpy(), pred_heading_residual[i,j].detach().cpu().numpy()) + box_size = config_dict['dataset_config'].class2size(\ + int(pred_size_class[i,j].detach().cpu().numpy()), pred_size_residual[i,j].detach().cpu().numpy()) + corners_3d_upright_camera = get_3d_box( + box_size, heading_angle, pred_center_upright_camera[i, j, :]) + pred_corners_3d_upright_camera[i, j] = corners_3d_upright_camera + + K = pred_center.shape[1] # K==num_proposal + nonempty_box_mask = np.ones((bsize, K)) + + if config_dict['remove_empty_box']: + # ------------------------------------- + # Remove predicted boxes without any point within them.. + batch_pc = end_points['point_clouds'].cpu().numpy()[:, :, 0:3] # B,N,3 + for i in range(bsize): + pc = batch_pc[i, :, :] # (N,3) + for j in range(K): + box3d = pred_corners_3d_upright_camera[i, j, :, :] # (8,3) + # box3d = flip_axis_to_depth(box3d) + pc_in_box, inds = extract_pc_in_box3d(pc, box3d) + if len(pc_in_box) < 5: + nonempty_box_mask[i, j] = 0 + # ------------------------------------- + + obj_logits = end_points['objectness_scores'].detach().cpu().numpy() + obj_prob = softmax(obj_logits)[:, :, 1] # (B,K) + if not config_dict['use_3d_nms']: + # ---------- NMS input: pred_with_prob in (B,K,7) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_2d_with_prob = np.zeros((K, 5)) + for j in range(K): + boxes_2d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_2d_with_prob[j, 2] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_2d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_2d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_2d_with_prob[j, 4] = obj_prob[i, j] + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + pick = nms_2d_faster( + boxes_2d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], config_dict['use_old_type_nms']) + assert (len(pick) > 0) + pred_mask[i, nonempty_box_inds[pick]] = 1 + end_points['pred_mask'] = pred_mask + # ---------- NMS output: pred_mask in (B,K) ----------- + elif config_dict['use_3d_nms'] and (not config_dict['cls_nms']): + # ---------- NMS input: pred_with_prob in (B,K,7) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_3d_with_prob = np.zeros((K, 7)) + for j in range(K): + boxes_3d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 2] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 4] = np.max( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 5] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 6] = obj_prob[i, j] + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + pick = nms_3d_faster( + boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], config_dict['use_old_type_nms']) + assert (len(pick) > 0) + pred_mask[i, nonempty_box_inds[pick]] = 1 + end_points['pred_mask'] = pred_mask + # ---------- NMS output: pred_mask in (B,K) ----------- + elif config_dict['use_3d_nms'] and config_dict['cls_nms']: + # ---------- NMS input: pred_with_prob in (B,K,8) ----------- + pred_mask = np.zeros((bsize, K)) + for i in range(bsize): + boxes_3d_with_prob = np.zeros((K, 8)) + for j in range(K): + boxes_3d_with_prob[j, 0] = np.min( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 1] = np.min( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 2] = np.min( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 3] = np.max( + pred_corners_3d_upright_camera[i, j, :, 0]) + boxes_3d_with_prob[j, 4] = np.max( + pred_corners_3d_upright_camera[i, j, :, 1]) + boxes_3d_with_prob[j, 5] = np.max( + pred_corners_3d_upright_camera[i, j, :, 2]) + boxes_3d_with_prob[j, 6] = obj_prob[i, j] + boxes_3d_with_prob[j, 7] = pred_sem_cls[ + i, + j] # only suppress if the two boxes are of the same class!! + nonempty_box_inds = np.where(nonempty_box_mask[i, :] == 1)[0] + pick = nms_3d_faster_samecls( + boxes_3d_with_prob[nonempty_box_mask[i, :] == 1, :], + config_dict['nms_iou'], config_dict['use_old_type_nms']) + assert (len(pick) > 0) + pred_mask[i, nonempty_box_inds[pick]] = 1 + end_points['pred_mask'] = pred_mask + # ---------- NMS output: pred_mask in (B,K) ----------- + + batch_pred_map_cls = [ + ] # a list (len: batch_size) of list (len: num of predictions per sample) of tuples of pred_cls, pred_box and conf (0-1) + for i in range(bsize): + if config_dict['per_class_proposal']: + cur_list = [] + for ii in range(config_dict['dataset_config'].num_class): + cur_list += [(ii, pred_corners_3d_upright_camera[i,j], sem_cls_probs[i,j,ii]*obj_prob[i,j]) \ + for j in range(pred_center.shape[1]) if pred_mask[i,j]==1 and obj_prob[i,j]>config_dict['conf_thresh']] + batch_pred_map_cls.append(cur_list) + else: + batch_pred_map_cls.append([(pred_sem_cls[i,j].item(), pred_corners_3d_upright_camera[i,j], obj_prob[i,j]) \ + for j in range(pred_center.shape[1]) if pred_mask[i,j]==1 and obj_prob[i,j]>config_dict['conf_thresh']]) + end_points['batch_pred_map_cls'] = batch_pred_map_cls + + return batch_pred_map_cls + + +def parse_groundtruths(end_points, config_dict): + """Parse groundtruth labels to OBB parameters. + + Args: + end_points: dict + {center_label, heading_class_label, heading_residual_label, + size_class_label, size_residual_label, sem_cls_label, + box_label_mask} + config_dict: dict + {dataset_config} + + Returns: + batch_gt_map_cls: a list of len == batch_size (BS) + [gt_list_i], i = 0, 1, ..., BS-1 + where gt_list_i = [(gt_sem_cls, gt_box_params)_j] + where j = 0, ..., num of objects - 1 at sample input i + """ + center_label = end_points['center_label'] + heading_class_label = end_points['heading_class_label'] + heading_residual_label = end_points['heading_residual_label'] + size_class_label = end_points['size_class_label'] + size_residual_label = end_points['size_residual_label'] + box_label_mask = end_points['box_label_mask'] + sem_cls_label = end_points['sem_cls_label'] + bsize = center_label.shape[0] + + K2 = center_label.shape[1] # K2==MAX_NUM_OBJ + gt_corners_3d_upright_camera = np.zeros((bsize, K2, 8, 3)) + # gt_center_upright_camera = flip_axis_to_camera(center_label[:,:,0:3].detach().cpu().numpy()) + gt_center_upright_camera = center_label[:, :, 0:3].detach().cpu().numpy() + for i in range(bsize): + for j in range(K2): + if box_label_mask[i, j] == 0: continue + heading_angle = config_dict['dataset_config'].class2angle( + heading_class_label[i, j].detach().cpu().numpy(), + heading_residual_label[i, j].detach().cpu().numpy()) + box_size = config_dict['dataset_config'].class2size( + int(size_class_label[i, j].detach().cpu().numpy()), + size_residual_label[i, j].detach().cpu().numpy()) + corners_3d_upright_camera = get_3d_box( + box_size, heading_angle, gt_center_upright_camera[i, j, :]) + gt_corners_3d_upright_camera[i, j] = corners_3d_upright_camera + + batch_gt_map_cls = [] + for i in range(bsize): + batch_gt_map_cls.append([ + (sem_cls_label[i, j].item(), gt_corners_3d_upright_camera[i, j]) + for j in range(gt_corners_3d_upright_camera.shape[1]) + if box_label_mask[i, j] == 1 + ]) + end_points['batch_gt_map_cls'] = batch_gt_map_cls + + return batch_gt_map_cls + + +class APCalculator(object): + """Calculating Average Precision.""" + + def __init__(self, ap_iou_thresh=0.25, class2type_map=None): + """ + Args: + ap_iou_thresh: float between 0 and 1.0 + IoU threshold to judge whether a prediction is positive. + class2type_map: [optional] dict {class_int:class_name} + """ + self.ap_iou_thresh = ap_iou_thresh + self.class2type_map = class2type_map + self.reset() + + def step(self, batch_pred_map_cls, batch_gt_map_cls): + """Accumulate one batch of prediction and groundtruth. + + Args: + batch_pred_map_cls: a list of lists [[(pred_cls, pred_box_params, score),...],...] + batch_gt_map_cls: a list of lists [[(gt_cls, gt_box_params),...],...] + should have the same length with batch_pred_map_cls (batch_size) + """ + + bsize = len(batch_pred_map_cls) + assert (bsize == len(batch_gt_map_cls)) + for i in range(bsize): + self.gt_map_cls[self.scan_cnt] = batch_gt_map_cls[i] + self.pred_map_cls[self.scan_cnt] = batch_pred_map_cls[i] + self.scan_cnt += 1 + + def compute_metrics(self): + """Use accumulated predictions and groundtruths to compute Average + Precision.""" + rec, prec, ap = eval_det_multiprocessing(self.pred_map_cls, + self.gt_map_cls, + ovthresh=self.ap_iou_thresh, + get_iou_func=get_iou_obb) + ret_dict = {} + for key in sorted(ap.keys()): + clsname = self.class2type_map[key] if self.class2type_map else str( + key) + ret_dict['%s Average Precision' % (clsname)] = ap[key] + ret_dict['mAP'] = np.mean(list(ap.values())) + rec_list = [] + for key in sorted(ap.keys()): + clsname = self.class2type_map[key] if self.class2type_map else str( + key) + try: + ret_dict['%s Recall' % (clsname)] = rec[key][-1] + rec_list.append(rec[key][-1]) + except: + ret_dict['%s Recall' % (clsname)] = 0 + rec_list.append(0) + ret_dict['AR'] = np.mean(rec_list) + return ret_dict + + def reset(self): + self.gt_map_cls = {} # {scan_id: [(classname, bbox)]} + self.pred_map_cls = {} # {scan_id: [(classname, bbox, score)]} + self.scan_cnt = 0 diff --git a/models/Scanrefer/lib/config.py b/models/Scanrefer/lib/config.py new file mode 100644 index 0000000..611d766 --- /dev/null +++ b/models/Scanrefer/lib/config.py @@ -0,0 +1,33 @@ +import os +import sys + +from easydict import EasyDict + +CONF = EasyDict() + +ENV_PATH = os.path.abspath(__file__) +# path +CONF.PATH = EasyDict() +CONF.PATH.BASE = os.path.dirname(os.path.dirname(ENV_PATH)) +CONF.PATH.DATA = os.path.join(CONF.PATH.BASE, 'data') +CONF.PATH.SCANNET = os.path.join(CONF.PATH.DATA, 'scannet') +CONF.PATH.LIB = os.path.join(CONF.PATH.BASE, 'lib') +CONF.PATH.MODELS = os.path.join(CONF.PATH.BASE, 'models') +CONF.PATH.UTILS = os.path.join(CONF.PATH.BASE, 'utils') + +# append to syspath +for _, path in CONF.PATH.items(): + sys.path.append(path) + +# scannet data +CONF.PATH.SCANNET_SCANS = os.path.join(CONF.PATH.SCANNET, 'scans') +CONF.PATH.SCANNET_META = os.path.join(CONF.PATH.SCANNET, 'meta_data') +CONF.PATH.SCANNET_DATA = os.path.join(CONF.PATH.SCANNET, 'scannet_data') + +# output +CONF.PATH.OUTPUT = os.path.join(CONF.PATH.BASE, 'scanrefer_1031_0.2_25epoch') + +# train +CONF.TRAIN = EasyDict() +CONF.TRAIN.MAX_DES_LEN = 126 +CONF.TRAIN.SEED = 42 diff --git a/models/Scanrefer/lib/dataset.py b/models/Scanrefer/lib/dataset.py new file mode 100644 index 0000000..5694729 --- /dev/null +++ b/models/Scanrefer/lib/dataset.py @@ -0,0 +1,370 @@ +''' +File Created: Monday, 25th November 2019 1:35:30 pm +Author: Dave Zhenyu Chen (zhenyu.chen@tum.de) +''' + +import json +import multiprocessing as mp +import os +import pickle +import sys +import time + +import h5py +import numpy as np +import torch +from easydict import EasyDict as edict +from torch.utils.data import Dataset +from tqdm import tqdm + +from mmscan import MMScan + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from data.scannet.model_util_scannet import (ScannetDatasetConfig, + rotate_aligned_boxes, + rotate_aligned_boxes_along_axis) +from lib.config import CONF +from utils.pc_utils import random_sampling, rotx, roty, rotz + +# data setting +DC = ScannetDatasetConfig() +MAX_NUM_OBJ = 128 +MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8]) + +# data path +# SCANNET_V2_TSV = os.path.join(CONF.PATH.SCANNET_META, "scannetv2-labels.combined.tsv") +# MULTIVIEW_DATA = os.path.join(CONF.PATH.SCANNET_DATA, "enet_feats.hdf5") + +# no multi-view +MULTIVIEW_DATA = '' +GLOVE_PICKLE = os.path.join(CONF.PATH.DATA, 'glove.p') +UNK_CLASS_ID = 165 # for "object" in es. The actural value is 166, but es counts from 1. + +import nltk +from nltk.tokenize import word_tokenize + +# nltk.download('punkt') + + +def tokenize_text(description): + try: + tokens = word_tokenize(description) + except: + # download punkt package if not exists + nltk.download('punkt') + tokens = word_tokenize(description) + return tokens + + +from .euler_utils import euler_to_matrix_np +from .utils_read import (NUM2RAW_3RSCAN, RAW2NUM_3RSCAN, apply_mapping_to_keys, + read_es_infos, to_scene_id) + + +class ScannetReferenceDataset(Dataset): + + def __init__(self, + es_info_file, + vg_raw_data_file, + split='train', + num_points=40000, + use_height=False, + use_color=False, + use_normal=False, + use_multiview=False, + augment=False): + # load the embeding + with open(GLOVE_PICKLE, 'rb') as f: + self.glove_embeding = pickle.load(f) + from data.scannet.meta_data.es_type_id import es_type_dict + self.raw2label = {k: v - 1 for k, v in es_type_dict.items()} + self.split = split + self.num_points = num_points + self.use_color = use_color + self.use_height = use_height + self.use_normal = use_normal + self.use_multiview = use_multiview + self.augment = augment + self.debug = False + + # set the ratio to 0.2 for training and valing + self.mmscan_loader = MMScan(version='v1',\ + split=split, verbose=True,task='MMScan-VG',ratio=0.2) + + def __len__(self): + return len(self.mmscan_loader) + + def __getitem__(self, idx): + + return self.parse_dict(self.mmscan_loader[idx]) + + def parse_dict(self, data_dict) -> dict: + + start = time.time() + + # propreccessing for anno + scene_id = data_dict['scan_id'] + object_id = data_dict['target_id'] + + assert len(object_id) > 0 + + idx = data_dict['index'] + + tokens = tokenize_text(data_dict['text'].lower()) + + # get language features + # tokenize the description + + embeddings = np.zeros((CONF.TRAIN.MAX_DES_LEN, 300)) + for token_id in range(CONF.TRAIN.MAX_DES_LEN): + if token_id < len(tokens): + token = tokens[token_id] + if token in self.glove_embeding: + embeddings[token_id] = self.glove_embeding[token] + else: + embeddings[token_id] = self.glove_embeding['unk'] + + lang_feat = embeddings + lang_len = len(tokens) + lang_len = lang_len if lang_len <= CONF.TRAIN.MAX_DES_LEN else CONF.TRAIN.MAX_DES_LEN + + # data_dict["pcds"] = self.MMScan_collect["scan"][scan_idx]['pcds'] + # data_dict["obj_pcds"] = self.MMScan_collect["scan"][scan_idx]['obj_pcds'] + # data_dict["scene_center"] = self.MMScan_collect["scan"][scan_idx]['scene_center'] + # data_dict["bboxes"] + + # proprocess for pcds and boxxes + mesh_vertices = data_dict['pcds'] + assert mesh_vertices.shape[1] == 6 + instance_labels = data_dict['instance_labels'] + + instance_bboxes = np.zeros((MAX_NUM_OBJ, 8 + 3)) + + for index_, obj_id in enumerate(data_dict['bboxes']): + if index_ >= MAX_NUM_OBJ: + break + bbox = data_dict['bboxes'][obj_id]['bbox'] + obj_type = data_dict['bboxes'][obj_id]['type'] + instance_bboxes[index_, :6] = bbox[:6] + instance_bboxes[index_, 6:9] = bbox[6:] + if obj_type in self.raw2label: + instance_bboxes[index_, 9] = self.raw2label[obj_type] + else: + if obj_type == 'steps': + instance_bboxes[index_, 9] = self.raw2label['step'] + instance_bboxes[index_, 10] = obj_id + + # use color&normal, no multiview + + if not self.use_color: + point_cloud = mesh_vertices[:, 0:3] # do not use color for now + pcl_color = mesh_vertices[:, 3:6] + else: + point_cloud = mesh_vertices[:, 0:6] + point_cloud[:, 3:6] = point_cloud[:, 3:6] - MEAN_COLOR_RGB / 256.0 + pcl_color = point_cloud[:, 3:6] + + if self.use_normal: + normals = mesh_vertices[:, 6:9] + point_cloud = np.concatenate([point_cloud, normals], 1) + + if self.use_multiview: + # load multiview database + pid = mp.current_process().pid + if pid not in self.multiview_data: + self.multiview_data[pid] = h5py.File(MULTIVIEW_DATA, + 'r', + libver='latest') + + multiview = self.multiview_data[pid][scene_id] + point_cloud = np.concatenate([point_cloud, multiview], 1) + + if self.use_height: + floor_height = np.percentile(point_cloud[:, 2], 0.99) + height = point_cloud[:, 2] - floor_height + point_cloud = np.concatenate( + [point_cloud, np.expand_dims(height, 1)], 1) + + point_cloud, choices = random_sampling(point_cloud, + self.num_points, + return_choices=True) + instance_labels = instance_labels[choices] + pcl_color = pcl_color[choices] + + # ------------------------------- LABELS ------------------------------ + target_bboxes = np.zeros((MAX_NUM_OBJ, 9)) + target_bboxes_mask = np.zeros((MAX_NUM_OBJ)) + angle_classes = np.zeros((MAX_NUM_OBJ, )) + angle_residuals = np.zeros((MAX_NUM_OBJ, )) + size_classes = np.zeros((MAX_NUM_OBJ, )) + size_residuals = np.zeros((MAX_NUM_OBJ, 3)) + ref_box_label = np.zeros( + MAX_NUM_OBJ) # bbox label for reference target + ref_class_label = np.zeros(DC.num_size_cluster) + + target_bboxes_rot_mat = None # If target bboxes rot mat is not None, then the target bbox euler angle is not trusted + + if self.split != 'test': + num_bbox = instance_bboxes.shape[ + 0] if instance_bboxes.shape[0] < MAX_NUM_OBJ else MAX_NUM_OBJ + target_bboxes_mask[0:num_bbox] = 1 + target_bboxes[0:num_bbox, :] = instance_bboxes[:MAX_NUM_OBJ, 0:9] + + point_votes = np.zeros([self.num_points, 3]) + point_votes_mask = np.zeros(self.num_points) + + # ------------------------------- DATA AUGMENTATION ------------------------------ + if self.augment and not self.debug: + # TODO + + if np.random.random() > 0.5: + # Flipping along the YZ plane + point_cloud[:, 0] = -1 * point_cloud[:, 0] + target_bboxes[:, 0] = -1 * target_bboxes[:, 0] + target_bboxes[:, 6] = -target_bboxes[:, 6] + target_bboxes[:, 8] = -target_bboxes[:, 8] + + if np.random.random() > 0.5: + # Flipping along the XZ plane + point_cloud[:, 1] = -1 * point_cloud[:, 1] + target_bboxes[:, 1] = -1 * target_bboxes[:, 1] + target_bboxes[:, 6] = -target_bboxes[:, 6] + target_bboxes[:, 7] = -target_bboxes[:, 7] + + # Rotation along up-axis/Z-axis + target_bboxes_rot_mat = euler_to_matrix_np(target_bboxes[:, + 6:9]) + rot_angle = (np.random.random() * np.pi / + 18) - np.pi / 36 # -5 ~ +5 degree + rot_mat = rotz(rot_angle) + point_cloud[:, 0:3] = np.dot(point_cloud[:, 0:3], + np.transpose(rot_mat)) + # target_bboxes = rotate_aligned_boxes_along_axis(target_bboxes, rot_mat, "z") + target_bboxes[:, 0:3] = np.dot(target_bboxes[:, 0:3], + np.transpose(rot_mat)) + target_bboxes_rot_mat = rot_mat @ target_bboxes_rot_mat + + # Scale + scale_factor = np.random.uniform(0.9, 1.1) + target_bboxes[:, 0:6] *= scale_factor + point_cloud[:, 0:3] *= scale_factor + + # Translation + trans_factor = np.random.normal(scale=np.array( + [.1, .1, .1], dtype=np.float32), + size=3).T + point_cloud[:, 0:3] += trans_factor + target_bboxes[:, 0:3] += trans_factor + + # compute votes *AFTER* augmentation + # generate votes + # Note: since there's no map between bbox instance labels and + # pc instance_labels (it had been filtered + # in the data preparation step) we'll compute the instance bbox + # from the points sharing the same instance label. + for i_instance in np.unique(instance_labels): + # find all points belong to that instance + ind = np.where(instance_labels == i_instance)[0] + if len(ind) < 10: + continue + + x = point_cloud[ind, :3] + center = 0.5 * (x.min(0) + x.max(0)) + point_votes[ind, :] = center - x + point_votes_mask[ind] = 1.0 + point_votes = np.tile(point_votes, + (1, 3)) # make 3 votes identical + + class_ind = [ + DC.nyu40id2class[int(x)] + for x in instance_bboxes[:num_bbox, -2] + ] + # NOTE: set size class as semantic class. Consider use size2class. + size_classes[0:num_bbox] = class_ind + size_residuals[0:num_bbox, :] = target_bboxes[ + 0:num_bbox, 3:6] - DC.mean_size_arr[class_ind, :] + + # construct the reference target label for each bbox + if target_bboxes_rot_mat is None: + target_bboxes_rot_mat = euler_to_matrix_np(target_bboxes[:, + 6:9]) + for i, gt_id in enumerate(instance_bboxes[:num_bbox, -1]): + if gt_id in object_id: + ref_box_label[i] = 1 + ref_class_label[DC.nyu40id2class[int( + instance_bboxes[i, -2])]] = 1 + else: + num_bbox = 1 + point_votes = np.zeros([self.num_points, + 9]) # make 3 votes identical + point_votes_mask = np.zeros(self.num_points) + + target_bboxes_semcls = np.zeros((MAX_NUM_OBJ)) + try: + target_bboxes_semcls[0:num_bbox] = [ + DC.nyu40id2class[int(x)] + for x in instance_bboxes[:, -2][0:num_bbox] + ] + except KeyError: + pass + + if target_bboxes_rot_mat is None: + target_bboxes_rot_mat = euler_to_matrix_np(target_bboxes[:, 6:9]) + + parse_data_dict = {} + parse_data_dict['point_clouds'] = point_cloud.astype( + np.float32) # point cloud data including features + parse_data_dict['lang_feat'] = lang_feat.astype( + np.float32) # language feature vectors + parse_data_dict['lang_len'] = np.array(lang_len).astype( + np.int64) # length of each description + parse_data_dict['center_label'] = target_bboxes.astype( + np.float32)[:, 0:3] # (MAX_NUM_OBJ, 3) for GT box center XYZ + parse_data_dict['heading_class_label'] = angle_classes.astype( + np.int64 + ) # (MAX_NUM_OBJ,) with int values in 0,...,NUM_HEADING_BIN-1 + parse_data_dict['heading_residual_label'] = angle_residuals.astype( + np.float32) # (MAX_NUM_OBJ,) + parse_data_dict['target_bbox'] = target_bboxes.astype(np.float32) + parse_data_dict['target_rot_mat'] = target_bboxes_rot_mat.astype( + np.float32) + parse_data_dict['size_class_label'] = size_classes.astype( + np.int64 + ) # (MAX_NUM_OBJ,) with int values in 0,...,NUM_SIZE_CLUSTER + parse_data_dict['size_residual_label'] = size_residuals.astype( + np.float32) # (MAX_NUM_OBJ, 3) + parse_data_dict['num_bbox'] = np.array(num_bbox).astype(np.int64) + parse_data_dict['sem_cls_label'] = target_bboxes_semcls.astype( + np.int64) # (MAX_NUM_OBJ,) semantic class index + parse_data_dict['box_label_mask'] = target_bboxes_mask.astype( + np.float32) # (MAX_NUM_OBJ) as 0/1 with 1 indicating a unique box + parse_data_dict['vote_label'] = point_votes.astype(np.float32) + parse_data_dict['vote_label_mask'] = point_votes_mask.astype(np.int64) + parse_data_dict['scan_idx'] = np.array(idx).astype(np.int64) + parse_data_dict['pcl_color'] = pcl_color + parse_data_dict['ref_box_label'] = ref_box_label.astype( + bool) # 0/1 reference labels for each object bbox + parse_data_dict['ref_class_label'] = ref_class_label.astype(np.float32) + parse_data_dict['pcl_color'] = pcl_color + parse_data_dict['sub_class'] = data_dict['sub_class'] + parse_data_dict['sample_ID'] = data_dict['ID'] + parse_data_dict['load_time'] = time.time() - start + + return parse_data_dict + + def _translate(self, point_set, bbox): + # unpack + coords = point_set[:, :3] + + # translation factors + x_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0] + y_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0] + z_factor = np.random.choice(np.arange(-0.5, 0.501, 0.001), size=1)[0] + factor = [x_factor, y_factor, z_factor] + + # dump + coords += factor + point_set[:, :3] = coords + bbox[:, :3] += factor + + return point_set, bbox diff --git a/models/Scanrefer/lib/enet.py b/models/Scanrefer/lib/enet.py new file mode 100644 index 0000000..fecc346 --- /dev/null +++ b/models/Scanrefer/lib/enet.py @@ -0,0 +1,894 @@ +from functools import reduce + +import torch +import torch._utils +import torch.nn as nn +from torch.autograd import Variable + +# compatible with PyTorch 0.4.0 +try: + torch._utils._rebuild_tensor_v2 +except AttributeError: + + def _rebuild_tensor_v2(storage, storage_offset, size, stride, + requires_grad, backward_hooks): + tensor = torch._utils._rebuild_tensor(storage, storage_offset, size, + stride) + tensor.requires_grad = requires_grad + tensor._backward_hooks = backward_hooks + return tensor + + torch._utils._rebuild_tensor_v2 = _rebuild_tensor_v2 + + +class LambdaBase(nn.Sequential): + + def __init__(self, fn, *args): + super(LambdaBase, self).__init__(*args) + self.lambda_func = fn + + def forward_prepare(self, input): + output = [] + for module in self._modules.values(): + output.append(module(input)) + return output if output else input + + +class Lambda(LambdaBase): + + def forward(self, input): + return self.lambda_func(self.forward_prepare(input)) + + +class LambdaMap(LambdaBase): + + def forward(self, input): + # result is Variables list [Variable1, Variable2, ...] + return list(map(self.lambda_func, self.forward_prepare(input))) + + +class LambdaReduce(LambdaBase): + + def forward(self, input): + # result is a Variable + return reduce(self.lambda_func, self.forward_prepare(input)) + + +class Padding(nn.Module): + # pad puts in [pad] amount of [value] over dimension [dim], starting at + # index [index] in that dimension. If pad<0, index counts from the left. + # If pad>0 index counts from the right. + # When nInputDim is provided, inputs larger than that value will be considered batches + # where the actual dim to be padded will be dimension dim + 1. + def __init__(self, dim, pad, value, index, nInputDim): + super(Padding, self).__init__() + self.value = value + # self.index = index + self.dim = dim + self.pad = pad + self.nInputDim = nInputDim + if index != 0: + raise NotImplementedError('Padding: index != 0 not implemented') + + def forward(self, input): + dim = self.dim + if self.nInputDim != 0: + dim += input.dim() - self.nInputDim + pad_size = list(input.size()) + pad_size[dim] = self.pad + padder = Variable(input.data.new(*pad_size).fill_(self.value)) + + if self.pad < 0: + padded = torch.cat((padder, input), dim) + else: + padded = torch.cat((input, padder), dim) + return padded + + +class Dropout(nn.Dropout): + """Cancel out PyTorch rescaling by 1/(1-p)""" + + def forward(self, input): + input = input * (1 - self.p) + return super(Dropout, self).forward(input) + + +class Dropout2d(nn.Dropout2d): + """Cancel out PyTorch rescaling by 1/(1-p)""" + + def forward(self, input): + input = input * (1 - self.p) + return super(Dropout2d, self).forward(input) + + +class StatefulMaxPool2d(nn.MaxPool2d): # object keeps indices and input sizes + + def __init__(self, *args, **kwargs): + super(StatefulMaxPool2d, self).__init__(*args, **kwargs) + self.indices = None + self.input_size = None + + def forward(self, x): + return_indices, self.return_indices = self.return_indices, True + output, indices = super(StatefulMaxPool2d, self).forward(x) + self.return_indices = return_indices + self.indices = indices + self.input_size = x.size() + if return_indices: + return output, indices + return output + + +class StatefulMaxUnpool2d(nn.Module): + + def __init__(self, pooling): + super(StatefulMaxUnpool2d, self).__init__() + self.pooling = pooling + self.unpooling = nn.MaxUnpool2d(pooling.kernel_size, pooling.stride, + pooling.padding) + + def forward(self, x): + return self.unpooling.forward(x, self.pooling.indices, + self.pooling.input_size) + + +pooling_0 = StatefulMaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=False) +pooling_1 = StatefulMaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=False) +pooling_2 = StatefulMaxPool2d((2, 2), (2, 2), (0, 0), ceil_mode=False) + + +def create_enet(num_classes): + enet = nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Conv2d(3, 13, (3, 3), (2, 2), (1, 1), (1, 1), 1), + pooling_0, + ), + LambdaReduce(lambda x, y: torch.cat((x, y), 1)), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(16, + 16, (2, 2), (2, 2), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, + 64, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(64, 0.001, 0.1, True), + Dropout2d(0.01), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + pooling_1, + Padding(0, 48, 0, 0, 3), + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(64), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(64, + 16, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, + 64, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(64, 0.001, 0.1, True), + Dropout2d(0.01), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(64), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(64, + 16, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, + 64, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(64, 0.001, 0.1, True), + Dropout2d(0.01), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(64), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(64, + 16, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, + 64, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(64, 0.001, 0.1, True), + Dropout2d(0.01), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(64), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(64, + 16, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(16, 0.001, 0.1, True), + nn.PReLU(16), + nn.Conv2d(16, + 64, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(64, 0.001, 0.1, True), + Dropout2d(0.01), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(64), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(64, + 32, (2, 2), (2, 2), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + pooling_2, + Padding(0, 64, 0, 0, 3), + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (2, 2), (2, 2), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 32, (1, 5), (1, 1), (0, 2), (1, 1), + 1, + bias=False), + nn.Conv2d(32, 32, (5, 1), (1, 1), (2, 0), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (4, 4), (4, 4), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (8, 8), (8, 8), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 32, (1, 5), (1, 1), (0, 2), (1, 1), + 1, + bias=False), + nn.Conv2d(32, 32, (5, 1), (1, 1), (2, 0), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (16, 16), (16, 16), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (2, 2), (2, 2), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 32, (1, 5), (1, 1), (0, 2), (1, 1), + 1, + bias=False), + nn.Conv2d(32, 32, (5, 1), (1, 1), (2, 0), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (4, 4), (4, 4), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (1, 1), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (8, 8), (8, 8), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 32, (1, 5), (1, 1), (0, 2), (1, 1), + 1, + bias=False), + nn.Conv2d(32, 32, (5, 1), (1, 1), (2, 0), (1, 1), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + nn.Sequential( # Sequential, + LambdaMap( + lambda x: x, # ConcatTable, + nn.Sequential( # Sequential, + nn.Conv2d(128, + 32, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, 32, (3, 3), (1, 1), (16, 16), (16, 16), 1), + nn.BatchNorm2d(32, 0.001, 0.1, True), + nn.PReLU(32), + nn.Conv2d(32, + 128, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False), + nn.BatchNorm2d(128, 0.001, 0.1, True), + Dropout2d(0.1), + ), + nn.Sequential( # Sequential, + Lambda(lambda x: x), # Identity, + ), + ), + LambdaReduce(lambda x, y: x + y), # CAddTable, + nn.PReLU(128), + ), + # ENCODER END (add classifier) + nn.Sequential( + nn.Conv2d(128, + num_classes, (1, 1), (1, 1), (0, 0), (1, 1), + 1, + bias=False)) + #nn.Sequential( # Sequential, + # LambdaMap(lambda x: x, # ConcatTable, + # nn.Sequential( # Sequential, + # nn.Conv2d(128, 16, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # nn.PReLU(16), + # nn.ConvTranspose2d(16, 16, (3, 3), (2, 2), (1, 1), (1, 1)), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # nn.PReLU(16), + # nn.Conv2d(16, 64, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(64, 0.001, 0.1, True), + # ), + # nn.Sequential( # Sequential, + # Lambda(lambda x: x), # Identity, + # nn.Conv2d(128, 64, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(64, 0.001, 0.1, True), + # StatefulMaxUnpool2d(pooling_2), #SpatialMaxUnpooling, + # ), + # ), + # LambdaReduce(lambda x,y: x+y), # CAddTable, + # nn.PReLU(64), + #), + #nn.Sequential( # Sequential, + # LambdaMap(lambda x: x, # ConcatTable, + # nn.Sequential( # Sequential, + # nn.Conv2d(64, 16, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # nn.PReLU(16), + # nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # nn.PReLU(16), + # nn.Conv2d(16, 64, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(64, 0.001, 0.1, True), + # ), + # nn.Sequential( # Sequential, + # Lambda(lambda x: x), # Identity, + # ), + # ), + # LambdaReduce(lambda x,y: x+y), # CAddTable, + # nn.PReLU(64), + #), + #nn.Sequential( # Sequential, + # LambdaMap(lambda x: x, # ConcatTable, + # nn.Sequential( # Sequential, + # nn.Conv2d(64, 16, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # nn.PReLU(16), + # nn.Conv2d(16, 16, (3, 3), (1, 1), (1, 1), (1, 1), 1), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # nn.PReLU(16), + # nn.Conv2d(16, 64, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(64, 0.001, 0.1, True), + # ), + # nn.Sequential( # Sequential, + # Lambda(lambda x: x), # Identity, + # ), + # ), + # LambdaReduce(lambda x,y: x+y), # CAddTable, + # nn.PReLU(64), + #), + #nn.Sequential( # Sequential, + # LambdaMap(lambda x: x, # ConcatTable, + # nn.Sequential( # Sequential, + # nn.Conv2d(64, 4, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(4, 0.001, 0.1, True), + # nn.PReLU(4), + # nn.ConvTranspose2d(4, 4, (3, 3), (2, 2), (1, 1), (1, 1)), + # nn.BatchNorm2d(4, 0.001, 0.1, True), + # nn.PReLU(4), + # nn.Conv2d(4, 16, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # ), + # nn.Sequential( # Sequential, + # Lambda(lambda x: x), # Identity, + # nn.Conv2d(64, 16, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # StatefulMaxUnpool2d(pooling_1), #SpatialMaxUnpooling, + # ), + # ), + # LambdaReduce(lambda x,y: x+y), # CAddTable, + # nn.PReLU(16), + #), + #nn.Sequential( # Sequential, + # LambdaMap(lambda x: x, # ConcatTable, + # nn.Sequential( # Sequential, + # nn.Conv2d(16, 4, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(4, 0.001, 0.1, True), + # nn.PReLU(4), + # nn.Conv2d(4, 4, (3, 3), (1, 1), (1, 1), (1, 1), 1), + # nn.BatchNorm2d(4, 0.001, 0.1, True), + # nn.PReLU(4), + # nn.Conv2d(4, 16, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False), + # nn.BatchNorm2d(16, 0.001, 0.1, True), + # ), + # nn.Sequential( # Sequential, + # Lambda(lambda x: x), # Identity, + # ), + # ), + # LambdaReduce(lambda x,y: x+y), # CAddTable, + # nn.PReLU(16), + #), + #nn.ConvTranspose2d(16, num_classes, (2, 2), (2, 2), (0, 0), (0, 0)), + ) + return enet + + +def create_enet_for_3d(num_2d_classes, model_path, num_3d_classes): + model = create_enet(num_2d_classes) + model.load_state_dict(torch.load(model_path)) + # remove the classifier + n = len(model) + model_trainable = nn.Sequential(*(model[i] for i in range(n - 9, n - 1))) + model_fixed = nn.Sequential(*(model[i] for i in range(n - 9))) + #model_classifier = nn.Sequential(nn.Conv2d(128, num_3d_classes, (1, 1), (1, 1), (0, 0), (1, 1), 1, bias=False)) + model_classifier = nn.Sequential(model[n - 1]) + #print 'model_fixed' + #print model_fixed + #print 'model_trainable' + #print model_trainable + #print 'model_classifier' + #print model_classifier + #raw_input('sdflkj') + for param in model_fixed.parameters(): + param.requires_grad = False + return model_fixed, model_trainable, model_classifier diff --git a/models/Scanrefer/lib/euler_utils.py b/models/Scanrefer/lib/euler_utils.py new file mode 100644 index 0000000..ace6e1a --- /dev/null +++ b/models/Scanrefer/lib/euler_utils.py @@ -0,0 +1,345 @@ +from typing import Tuple, Union + +import torch +from torch.nn.functional import l1_loss, mse_loss, smooth_l1_loss + +try: + from pytorch3d.ops import box3d_overlap +except ImportError: + print('warning: failed to import pytorch3d') + box3d_overlap = None + +if box3d_overlap is not None: + from pytorch3d.transforms import (euler_angles_to_matrix, + matrix_to_euler_angles) +else: + + def _axis_angle_rotation(axis: str, angle: torch.Tensor) -> torch.Tensor: + """Return the rotation matrices for one of the rotations about an axis + of which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = torch.cos(angle) + sin = torch.sin(angle) + one = torch.ones_like(angle) + zero = torch.zeros_like(angle) + + if axis == 'X': + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == 'Y': + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == 'Z': + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError('letter must be either X, Y or Z.') + + return torch.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + def euler_angles_to_matrix(euler_angles: torch.Tensor, + convention: str) -> torch.Tensor: + """Convert rotations given as Euler angles in radians to rotation + matrices. + + Args: + euler_angles: Euler angles in radians as tensor of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + if euler_angles.dim() == 0 or euler_angles.shape[-1] != 3: + raise ValueError('Invalid input euler angles.') + if len(convention) != 3: + raise ValueError('Convention must have 3 letters.') + if convention[1] in (convention[0], convention[2]): + raise ValueError(f'Invalid convention {convention}.') + for letter in convention: + if letter not in ('X', 'Y', 'Z'): + raise ValueError( + f'Invalid letter {letter} in convention string.') + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, torch.unbind(euler_angles, -1)) + ] + # return functools.reduce(torch.matmul, matrices) + return torch.matmul(torch.matmul(matrices[0], matrices[1]), + matrices[2]) + + +def euler_to_matrix_np(euler): + # euler: N*3 np array + euler_tensor = torch.tensor(euler) + matrix_tensor = euler_angles_to_matrix(euler_tensor, 'ZXY') + return matrix_tensor.numpy() + + +def bbox_to_corners(centers, sizes, rot_mat: torch.Tensor) -> torch.Tensor: + """Transform bbox parameters to the 8 corners. + + Args: + bbox (Tensor): 3D box of shape (N, 6) or (N, 7) or (N, 9). + + Returns: + Tensor: Transformed 3D box of shape (N, 8, 3). + """ + device = centers.device + use_batch = False + if len(centers.shape) == 3: + use_batch = True + batch_size, n_proposals = centers.shape[0], centers.shape[1] + centers = centers.reshape(-1, 3) + sizes = sizes.reshape(-1, 3) + rot_mat = rot_mat.reshape(-1, 3, 3) + + n_box = centers.shape[0] + if use_batch: + assert n_box == batch_size * n_proposals + centers = centers.unsqueeze(1).repeat(1, 8, 1) # shape (N, 8, 3) + half_sizes = sizes.unsqueeze(1).repeat(1, 8, 1) / 2 # shape (N, 8, 3) + eight_corners_x = torch.tensor([1, 1, 1, 1, -1, -1, -1, -1], + device=device).unsqueeze(0).repeat( + n_box, 1) # shape (N, 8) + eight_corners_y = torch.tensor([1, 1, -1, -1, 1, 1, -1, -1], + device=device).unsqueeze(0).repeat( + n_box, 1) # shape (N, 8) + eight_corners_z = torch.tensor([1, -1, -1, 1, 1, -1, -1, 1], + device=device).unsqueeze(0).repeat( + n_box, 1) # shape (N, 8) + eight_corners = torch.stack( + (eight_corners_x, eight_corners_y, eight_corners_z), + dim=-1) # shape (N, 8, 3) + eight_corners = eight_corners * half_sizes # shape (N, 8, 3) + # rot_mat: (N, 3, 3), eight_corners: (N, 8, 3) + rotated_corners = torch.matmul(eight_corners, + rot_mat.transpose(1, 2)) # shape (N, 8, 3) + res = centers + rotated_corners + if use_batch: + res = res.reshape(batch_size, n_proposals, 8, 3) + return res + + +def chamfer_distance( + src: torch.Tensor, + dst: torch.Tensor, + src_weight: Union[torch.Tensor, float] = 1.0, + dst_weight: Union[torch.Tensor, float] = 1.0, + criterion_mode: str = 'l2', + reduction: str = 'mean' +) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]: + """Calculate Chamfer Distance of two sets. + + Args: + src (Tensor): Source set with shape [B, N, C] to + calculate Chamfer Distance. + dst (Tensor): Destination set with shape [B, M, C] to + calculate Chamfer Distance. + src_weight (Tensor or float): Weight of source loss. Defaults to 1.0. + dst_weight (Tensor or float): Weight of destination loss. + Defaults to 1.0. + criterion_mode (str): Criterion mode to calculate distance. + The valid modes are 'smooth_l1', 'l1' or 'l2'. Defaults to 'l2'. + reduction (str): Method to reduce losses. + The valid reduction method are 'none', 'sum' or 'mean'. + Defaults to 'mean'. + + Returns: + tuple: Source and Destination loss with the corresponding indices. + + - loss_src (Tensor): The min distance + from source to destination. + - loss_dst (Tensor): The min distance + from destination to source. + - indices1 (Tensor): Index the min distance point + for each point in source to destination. + - indices2 (Tensor): Index the min distance point + for each point in destination to source. + """ + if len(src.shape) == 4: + src = src.reshape(-1, 8, 3) + if len(dst.shape) == 4: + dst = dst.reshape(-1, 8, 3) + + if criterion_mode == 'smooth_l1': + criterion = smooth_l1_loss + elif criterion_mode == 'l1': + criterion = l1_loss + elif criterion_mode == 'l2': + criterion = mse_loss + else: + raise NotImplementedError + + src_expand = src.unsqueeze(2).repeat(1, 1, dst.shape[1], 1) + dst_expand = dst.unsqueeze(1).repeat(1, src.shape[1], 1, 1) + + distance = criterion(src_expand, dst_expand, reduction='none').sum(-1) + src2dst_distance, indices1 = torch.min(distance, dim=2) # (B,N) + dst2src_distance, indices2 = torch.min(distance, dim=1) # (B,M) + + loss_src = (src2dst_distance * src_weight) + loss_dst = (dst2src_distance * dst_weight) + + if reduction == 'sum': + loss_src = torch.sum(loss_src) + loss_dst = torch.sum(loss_dst) + elif reduction == 'mean': + loss_src = torch.mean(loss_src) + loss_dst = torch.mean(loss_dst) + elif reduction == 'none': + pass + else: + raise NotImplementedError + + return loss_src, loss_dst, indices1, indices2 + + +def axis_aligned_bbox_overlaps_3d(bboxes1, + bboxes2, + mode='iou', + is_aligned=False, + eps=1e-6): + """Calculate overlap between two set of axis aligned 3D bboxes. If + ``is_aligned`` is ``False``, then calculate the overlaps between each bbox + of bboxes1 and bboxes2, otherwise the overlaps between each aligned pair of + bboxes1 and bboxes2. + + Args: + bboxes1 (Tensor): shape (B, m, 6) in + format or empty. + bboxes2 (Tensor): shape (B, n, 6) in + format or empty. + B indicates the batch dim, in shape (B1, B2, ..., Bn). + If ``is_aligned`` is ``True``, then m and n must be equal. + mode (str): "iou" (intersection over union) or "giou" (generalized + intersection over union). + is_aligned (bool, optional): If True, then m and n must be equal. + Defaults to False. + eps (float, optional): A value added to the denominator for numerical + stability. Defaults to 1e-6. + + Returns: + Tensor: shape (m, n) if ``is_aligned`` is False else shape (m,) + + Example: + >>> bboxes1 = torch.FloatTensor([ + >>> [0, 0, 0, 10, 10, 10], + >>> [10, 10, 10, 20, 20, 20], + >>> [32, 32, 32, 38, 40, 42], + >>> ]) + >>> bboxes2 = torch.FloatTensor([ + >>> [0, 0, 0, 10, 20, 20], + >>> [0, 10, 10, 10, 19, 20], + >>> [10, 10, 10, 20, 20, 20], + >>> ]) + >>> overlaps = axis_aligned_bbox_overlaps_3d(bboxes1, bboxes2) + >>> assert overlaps.shape == (3, 3) + >>> overlaps = bbox_overlaps(bboxes1, bboxes2, is_aligned=True) + >>> assert overlaps.shape == (3, ) + Example: + >>> empty = torch.empty(0, 6) + >>> nonempty = torch.FloatTensor([[0, 0, 0, 10, 9, 10]]) + >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1) + >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0) + >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0) + """ + + assert mode in ['iou', 'giou'], f'Unsupported mode {mode}' + # Either the boxes are empty or the length of boxes's last dimension is 6 + assert (bboxes1.size(-1) == 6 or bboxes1.size(0) == 0) + assert (bboxes2.size(-1) == 6 or bboxes2.size(0) == 0) + + # Batch dim must be the same + # Batch dim: (B1, B2, ... Bn) + assert bboxes1.shape[:-2] == bboxes2.shape[:-2] + batch_shape = bboxes1.shape[:-2] + + rows = bboxes1.size(-2) + cols = bboxes2.size(-2) + if is_aligned: + assert rows == cols + + if rows * cols == 0: + if is_aligned: + return bboxes1.new(batch_shape + (rows, )) + else: + return bboxes1.new(batch_shape + (rows, cols)) + + area1 = (bboxes1[..., 3] - bboxes1[..., 0]) * ( + bboxes1[..., 4] - bboxes1[..., 1]) * (bboxes1[..., 5] - + bboxes1[..., 2]) + area2 = (bboxes2[..., 3] - bboxes2[..., 0]) * ( + bboxes2[..., 4] - bboxes2[..., 1]) * (bboxes2[..., 5] - + bboxes2[..., 2]) + + if is_aligned: + lt = torch.max(bboxes1[..., :3], bboxes2[..., :3]) # [B, rows, 3] + rb = torch.min(bboxes1[..., 3:], bboxes2[..., 3:]) # [B, rows, 3] + + wh = (rb - lt).clamp(min=0) # [B, rows, 2] + overlap = wh[..., 0] * wh[..., 1] * wh[..., 2] + + if mode in ['iou', 'giou']: + union = area1 + area2 - overlap + else: + union = area1 + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :3], bboxes2[..., :3]) + enclosed_rb = torch.max(bboxes1[..., 3:], bboxes2[..., 3:]) + else: + lt = torch.max(bboxes1[..., :, None, :3], + bboxes2[..., None, :, :3]) # [B, rows, cols, 3] + rb = torch.min(bboxes1[..., :, None, 3:], + bboxes2[..., None, :, 3:]) # [B, rows, cols, 3] + + wh = (rb - lt).clamp(min=0) # [B, rows, cols, 3] + overlap = wh[..., 0] * wh[..., 1] * wh[..., 2] + + if mode in ['iou', 'giou']: + union = area1[..., None] + area2[..., None, :] - overlap + if mode == 'giou': + enclosed_lt = torch.min(bboxes1[..., :, None, :3], + bboxes2[..., None, :, :3]) + enclosed_rb = torch.max(bboxes1[..., :, None, 3:], + bboxes2[..., None, :, 3:]) + + eps = union.new_tensor([eps]) + union = torch.max(union, eps) + ious = overlap / union + if mode in ['iou']: + return ious + # calculate gious + enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0) + enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1] * enclose_wh[..., 2] + enclose_area = torch.max(enclose_area, eps) + gious = ious - (enclose_area - union) / enclose_area + return gious + + +def euler_iou3d(boxes1, boxes2): + rows = boxes1.shape[0] + cols = boxes2.shape[0] + if rows * cols == 0: + return boxes1.new(rows, cols) + + _, iou3d = box3d_overlap(boxes1, boxes2) + return iou3d + + +def euler_iou3d_split(center1, size1, rot1, center2, size2, rot2): + device = center1.device + center1 = center1.cuda() + size1 = size1.cuda() + rot1 = rot1.cuda() + center2 = center2.cuda() + size2 = size2.cuda() + rot2 = rot2.cuda() + corners1 = bbox_to_corners(center1, size1, rot1) + corners2 = bbox_to_corners(center2, size2, rot2) + return euler_iou3d(corners1, corners2).to(device) diff --git a/models/Scanrefer/lib/eval_helper.py b/models/Scanrefer/lib/eval_helper.py new file mode 100644 index 0000000..f92325c --- /dev/null +++ b/models/Scanrefer/lib/eval_helper.py @@ -0,0 +1,195 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os +import sys + +import numpy as np +import torch +import torch.nn as nn + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from copy import deepcopy + +from grounding_metric import ground_eval +from lib.ap_helper import parse_predictions +from lib.euler_utils import bbox_to_corners +from lib.loss import SoftmaxRankingLoss +from utils.box_util import box3d_iou, get_3d_box, get_3d_box_batch +from utils.nn_distance import huber_loss, nn_distance + +from mmscan import VisualGroundingEvaluator + +# here we use the MMScan evaluator for visual grounding tasks. + + +def eval_ref_one_sample(pred_bbox, gt_bbox): + """Evaluate one reference prediction. + + Args: + pred_bbox: 8 corners of prediction bounding box, (8, 3) + gt_bbox: 8 corners of ground truth bounding box, (8, 3) + Returns: + iou: intersection over union score + """ + + iou = box3d_iou(pred_bbox, gt_bbox) + + return iou + + +def construct_bbox_corners(center, box_size): + sx, sy, sz = box_size + x_corners = [ + sx / 2, sx / 2, -sx / 2, -sx / 2, sx / 2, sx / 2, -sx / 2, -sx / 2 + ] + y_corners = [ + sy / 2, -sy / 2, -sy / 2, sy / 2, sy / 2, -sy / 2, -sy / 2, sy / 2 + ] + z_corners = [ + sz / 2, sz / 2, sz / 2, sz / 2, -sz / 2, -sz / 2, -sz / 2, -sz / 2 + ] + corners_3d = np.vstack([x_corners, y_corners, z_corners]) + corners_3d[0, :] = corners_3d[0, :] + center[0] + corners_3d[1, :] = corners_3d[1, :] + center[1] + corners_3d[2, :] = corners_3d[2, :] + center[2] + corners_3d = np.transpose(corners_3d) + + return corners_3d + + +def inference(data_dict, + config, + use_lang_classifier=False, + use_oracle=False, + use_cat_rand=False, + use_best=False, + post_processing=None): + """Loss functions. + + Args: + data_dict: dict + config: dataset config instance + reference: flag (False/True) + post_processing: config dict + Returns: + loss: pytorch scalar tensor + data_dict: dict + """ + + pred_res = [] + gt_res = [] + batch_size, num_words, _ = data_dict['lang_feat'].shape + + objectness_preds_batch = torch.argmax(data_dict['objectness_scores'], + 2).long().detach() + + if post_processing: + _ = parse_predictions(data_dict, post_processing) + nms_masks = torch.LongTensor(data_dict['pred_mask']).cuda() + + # construct valid mask + pred_masks = (nms_masks * objectness_preds_batch == 1).bool().detach() + else: + # construct valid mask + pred_masks = (objectness_preds_batch == 1).bool().detach() + + pred_center = data_dict['center'].detach().cpu() + pred_rot_mat = data_dict['rot_mat'].detach().cpu() + pred_size_class = torch.argmax(data_dict['size_scores'], + -1) # B,num_proposal + pred_size = torch.gather( + data_dict['size_calc'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat( + 1, 1, 1, 3)) # B,num_proposal,1,3 + pred_size = pred_size.squeeze(2) + pred_size = pred_size.detach().cpu() + pred_score = data_dict['cluster_ref'].detach().cpu() + gt_bbox = data_dict['target_bbox'].cpu() + gt_rot_mats = data_dict['target_rot_mat'].cpu() + gt_ref = data_dict['ref_box_label'].cpu() + + for i in range(batch_size): + mask = pred_masks[i] + pred_center_single = pred_center[i][mask] + pred_rot_mat_single = pred_rot_mat[i][mask] + pred_size_single = pred_size[i][mask] + pred_score_single = pred_score[i][mask] + pred_res.append({ + 'center': pred_center_single, + 'size': pred_size_single, + 'rot': pred_rot_mat_single, + 'score': pred_score_single + }) + + # TODO check if it only work for single gt box setting + # rbler: no problem? + gt_center = gt_bbox[i, gt_ref[i]][:, :3] + gt_size = gt_bbox[i, gt_ref[i]][:, 3:6] + gt_rot_mat = gt_rot_mats[i, gt_ref[i]] + + # print("gt_center shape:", gt_center.shape) + gt_res.append({ + 'center': gt_center.cpu(), + 'size': gt_size.cpu(), + 'rot': gt_rot_mat.cpu(), + 'sub_class': data_dict['sub_class'][i], + 'ID': data_dict['sample_ID'][i] + }) + + return pred_res, gt_res + + +def get_eval(pred_list, gt_list, logger): + + return ground_eval(gt_list, pred_list, logger) + #return get_eval_new(pred_list,gt_list, logger) + + +def to_mmscan_form(raw_batch_result): + + batch_result = {} + batch_result['pred_scores'] = raw_batch_result['pred_list']['score'] + batch_result['pred_bboxes'] = raw_batch_result['pred_list'] + batch_result['gt_bboxes'] = raw_batch_result['gt_list'] + batch_result['index'] = raw_batch_result['index'] + batch_result['subclass'] = raw_batch_result['gt_list']['sub_class'] + batch_result['ID'] = raw_batch_result['gt_list']['ID'] + return batch_result + + +def get_eval_new(pred_list, gt_list, logger): + + vg_evaluator = VisualGroundingEvaluator(True) + + assert len(pred_list) == len(gt_list) + + batch_input = [] + for _index in range(len(pred_list)): + batch_input.append({'pred_list':pred_list[_index],\ + 'gt_list':gt_list[_index],'index':_index}) + batch_input[-1] = to_mmscan_form(batch_input[-1]) + + vg_evaluator.update(batch_input) + + print('Staring evaluation!') + print(vg_evaluator.start_evaluation()) + + result_table = vg_evaluator.print_result() + vg_evaluator.reset() + + if logger is not None: + logger.write('\n' + result_table + '\n') + logger.flush() + + return result_table + + +if __name__ == '__main__': + store_file_path = '/mnt/petrelfs/linjingli/tmp/data/big_tmp/result_1020.pt' + store_results = torch.load(store_file_path) + pred_list = store_results['pred_list'] + gt_list = store_results['gt_list'] + get_eval(pred_list, gt_list, None) diff --git a/models/Scanrefer/lib/grounding_metric.py b/models/Scanrefer/lib/grounding_metric.py new file mode 100644 index 0000000..d8a1a0d --- /dev/null +++ b/models/Scanrefer/lib/grounding_metric.py @@ -0,0 +1,312 @@ +# Copyright (c) OpenRobotLab. All rights reserved. +import logging +from typing import Any, Dict, List, Optional, Sequence, Union + +import numpy as np +import torch +from lib.euler_utils import euler_iou3d_split +from terminaltables import AsciiTable +from tqdm import tqdm + + +def average_precision(recalls, precisions, mode='area'): + """Calculate average precision (for single or multiple scales). + + Args: + recalls (np.ndarray): Recalls with shape of (num_scales, num_dets) + or (num_dets, ). + precisions (np.ndarray): Precisions with shape of + (num_scales, num_dets) or (num_dets, ). + mode (str): 'area' or '11points', 'area' means calculating the area + under precision-recall curve, '11points' means calculating + the average precision of recalls at [0, 0.1, ..., 1] + + Returns: + float or np.ndarray: Calculated average precision. + """ + if recalls.ndim == 1: + recalls = recalls[np.newaxis, :] + precisions = precisions[np.newaxis, :] + + assert recalls.shape == precisions.shape + assert recalls.ndim == 2 + + num_scales = recalls.shape[0] + ap = np.zeros(num_scales, dtype=np.float32) + if mode == 'area': + zeros = np.zeros((num_scales, 1), dtype=recalls.dtype) + ones = np.ones((num_scales, 1), dtype=recalls.dtype) + mrec = np.hstack((zeros, recalls, ones)) + mpre = np.hstack((zeros, precisions, zeros)) + for i in range(mpre.shape[1] - 1, 0, -1): + mpre[:, i - 1] = np.maximum(mpre[:, i - 1], mpre[:, i]) + for i in range(num_scales): + ind = np.where(mrec[i, 1:] != mrec[i, :-1])[0] + ap[i] = np.sum( + (mrec[i, ind + 1] - mrec[i, ind]) * mpre[i, ind + 1]) + elif mode == '11points': + for i in range(num_scales): + for thr in np.arange(0, 1 + 1e-3, 0.1): + precs = precisions[i, recalls[i, :] >= thr] + prec = precs.max() if precs.size > 0 else 0 + ap[i] += prec + ap /= 11 + else: + raise ValueError( + 'Unrecognized mode, only "area" and "11points" are supported') + return ap + + +def abbr(sub_class): + sub_class = sub_class.lower() + sub_class = sub_class.replace('single', 'sngl') + sub_class = sub_class.replace('inter', 'int') + sub_class = sub_class.replace('unique', 'uniq') + sub_class = sub_class.replace('common', 'cmn') + sub_class = sub_class.replace('attribute', 'attr') + if 'sngl' in sub_class and ('attr' in sub_class or 'eq' in sub_class): + sub_class = 'vg_sngl_attr' + return sub_class + + +def ground_eval_subset(gt_anno_list, det_anno_list, logger=None, prefix=''): + """ + det_anno_list: list of dictionaries with keys: + 'bboxes_3d': (N, 9) or a (list, tuple) (center, size, rotmat): (N, 3), (N, 3), (N, 3, 3) + 'target_scores_3d': (N, ) + gt_anno_list: list of dictionaries with keys: + 'gt_bboxes_3d': (M, 9) or a (list, tuple) (center, size, rotmat): (M, 3), (M, 3), (M, 3, 3) + 'sub_class': str + """ + assert len(det_anno_list) == len(gt_anno_list) + iou_thr = [0.25, 0.5] + num_samples = len(gt_anno_list) # each sample contains multiple pred boxes + # these lists records for each sample, whether a gt box is matched or not + gt_matched_records = [[] for _ in iou_thr] + # these lists records for each pred box, NOT for each sample + sample_indices = [] # each pred box belongs to which sample + confidences = [] # each pred box has a confidence score + ious = [ + ] # each pred box has a ious, shape (num_gt) in the corresponding sample + # record the indices of each reference type + num_gts_per_sample = [] + + for sample_idx in tqdm(range(num_samples)): + det_anno = det_anno_list[sample_idx] + gt_anno = gt_anno_list[sample_idx] + + target_scores = det_anno['score'] # (num_query, ) + top_idxs = torch.argsort(target_scores, descending=True) + target_scores = target_scores[top_idxs] + pred_center = det_anno['center'][top_idxs] + pred_size = det_anno['size'][top_idxs] + pred_rot = det_anno['rot'][top_idxs] + + gt_center = gt_anno['center'] + gt_size = gt_anno['size'] + gt_rot = gt_anno['rot'] + + num_preds = pred_center.shape[0] + num_gts = gt_center.shape[0] + num_gts_per_sample.append(num_gts) + iou_mat = euler_iou3d_split(pred_center, pred_size, pred_rot, + gt_center, gt_size, gt_rot) + for i, score in enumerate(target_scores): + sample_indices.append(sample_idx) + confidences.append(score) + ious.append(iou_mat[i]) + + subset_result = { + 'confidences': confidences, # list, num_preds + 'sample_indices': sample_indices, # list, num_preds + 'ious': + ious, # list, num_preds, each element is a num_gt (changing) np.ndarray + 'num_gts_per_sample': num_gts_per_sample, # list, batch_size + 'prefix': prefix + } + return subset_result + + +def ground_eval_overall(gt_anno_list, det_anno_list, logger=None): + """ + det_anno_list: list of dictionaries with keys: + 'bboxes_3d': (N, 9) or a (list, tuple) (center, size, rotmat): (N, 3), (N, 3), (N, 3, 3) + 'target_scores_3d': (N, ) + gt_anno_list: list of dictionaries with keys: + 'gt_bboxes_3d': (M, 9) or a (list, tuple) (center, size, rotmat): (M, 3), (M, 3), (M, 3, 3) + 'sub_class': str + """ + iou_thr = [0.25, 0.5] + reference_options = list( + set(abbr(gt_anno['sub_class']) for gt_anno in gt_anno_list)) + reference_options.sort() + assert len(det_anno_list) == len(gt_anno_list) + full_result = {} + for ref in reference_options: + indices = [ + i for i, gt_anno in enumerate(gt_anno_list) + if abbr(gt_anno['sub_class']) == ref + ] + sub_gt_annos = [gt_anno_list[i] for i in indices] + sub_det_annos = [det_anno_list[i] for i in indices] + subset_results = ground_eval_subset(sub_gt_annos, + sub_det_annos, + logger=logger, + prefix=ref) + full_result[ref] = subset_results + # overall results + # contatenate all the subsets' results + full_result['overall'] = { + 'confidences': [], + 'sample_indices': [], + 'ious': [], + 'num_gts_per_sample': [], + 'prefix': 'overall' + } + prev_samples = 0 + for ref in reference_options: + full_result['overall']['confidences'].extend( + full_result[ref]['confidences']) + full_result['overall']['sample_indices'].extend( + [x + prev_samples for x in full_result[ref]['sample_indices']]) + full_result['overall']['ious'].extend(full_result[ref]['ious']) + full_result['overall']['num_gts_per_sample'].extend( + full_result[ref]['num_gts_per_sample']) + prev_samples += len(full_result[ref]['num_gts_per_sample']) + return full_result + + +def compute_metric_subset(subset_results, + iou_thr=[0.25, 0.5], + logger=None, + prefix=''): + assert isinstance(subset_results, + dict), f'expected a dict, but got {type(subset_results)}' + confidences = subset_results['confidences'] + sample_indices = subset_results['sample_indices'] + ious = subset_results['ious'] + total_gt_boxes = sum(subset_results['num_gts_per_sample']) + gt_matched_records = [[] for thr in iou_thr] + for iou_idx in range(len(iou_thr)): + for n_gt in subset_results['num_gts_per_sample']: + gt_matched_records[iou_idx].append(np.zeros(n_gt, dtype=bool)) + + confidences = np.array(confidences) + sorted_inds = np.argsort(-confidences) + sample_indices = [sample_indices[i] for i in sorted_inds] + ious = [ious[i] for i in sorted_inds] + + tp_thr = {} + fp_thr = {} + for thr in iou_thr: + tp_thr[f'{prefix}@{thr}'] = np.zeros(len(sample_indices)) + fp_thr[f'{prefix}@{thr}'] = np.zeros(len(sample_indices)) + + for d, sample_idx in enumerate(sample_indices): + iou_max = -np.inf + cur_iou = ious[d] + num_gts = cur_iou.shape[0] + if num_gts > 0: + for j in range(num_gts): + iou = cur_iou[j] + if iou > iou_max: + iou_max = iou + jmax = j + + for iou_idx, thr in enumerate(iou_thr): + if iou_max >= thr: + if not gt_matched_records[iou_idx][sample_idx][jmax]: + gt_matched_records[iou_idx][sample_idx][jmax] = True + tp_thr[f'{prefix}@{thr}'][d] = 1.0 + else: + fp_thr[f'{prefix}@{thr}'][d] = 1.0 + else: + fp_thr[f'{prefix}@{thr}'][d] = 1.0 + + ret = {} + for t in iou_thr: + metric = prefix + '@' + str(t) + fp = np.cumsum(fp_thr[metric]) + tp = np.cumsum(tp_thr[metric]) + recall = tp / float(total_gt_boxes) + precision = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = average_precision(recall, precision) + ret[metric] = float(ap) + best_recall = recall[-1] if len(recall) > 0 else 0 + f1s = 2 * recall * precision / np.maximum(recall + precision, + np.finfo(np.float64).eps) + best_f1 = max(f1s) + ret[metric + '_rec'] = float(best_recall) + ret[metric + '_f1'] = float(best_f1) + ret[prefix + '_num_gt'] = total_gt_boxes + return ret + + +def ground_eval(gt_anno_list, det_anno_list, logger=None): + """ + det_anno_list: list of dictionaries with keys: + 'bboxes_3d': (N, 9) or a (list, tuple) (center, size, rotmat): (N, 3), (N, 3), (N, 3, 3) + 'target_scores_3d': (N, ) + gt_anno_list: list of dictionaries with keys: + 'gt_bboxes_3d': (M, 9) or a (list, tuple) (center, size, rotmat): (M, 3), (M, 3), (M, 3, 3) + 'sub_class': str + """ + + iou_thr = [0.25, 0.5] + reference_options = [ + abbr(gt_anno.get('sub_class', 'other')) for gt_anno in gt_anno_list + ] + reference_options = list(set(reference_options)) + reference_options.sort() + reference_options.append('overall') + + assert len(det_anno_list) == len(gt_anno_list) + metric_results = {} + mid_result = ground_eval_overall(gt_anno_list, + det_anno_list, + logger=logger) + for ref in reference_options: + metric_results.update( + compute_metric_subset(mid_result[ref], + iou_thr=iou_thr, + logger=logger, + prefix=ref)) + metric_results.update( + compute_metric_subset(mid_result['overall'], + iou_thr=iou_thr, + logger=logger, + prefix='overall')) + + header = ['Type'] + header.extend(reference_options) + table_columns = [[] for _ in range(len(header))] + for t in iou_thr: + table_columns[0].append('AP ' + str(t)) + table_columns[0].append('Rec ' + str(t)) + table_columns[0].append('F1 ' + str(t)) + for i, ref in enumerate(reference_options): + metric = ref + '@' + str(t) + ap = metric_results[metric] + best_recall = metric_results[metric + '_rec'] + best_f1 = metric_results[metric + '_f1'] + table_columns[i + 1].append(f'{float(ap):.4f}') + table_columns[i + 1].append(f'{float(best_recall):.4f}') + table_columns[i + 1].append(f'{float(best_f1):.4f}') + table_columns[0].append('Num GT') + for i, ref in enumerate(reference_options): + # add num_gt + table_columns[i + 1].append(f'{int(metric_results[ref + "_num_gt"])}') + + table_data = [header] + table_rows = list(zip(*table_columns)) + table_data += table_rows + table_data = [list(row) for row in zip(*table_data)] # transpose the table + table = AsciiTable(table_data) + table.inner_footing_row_border = True + # print('\n' + table.table) + if logger is not None: + logger.write('\n' + table.table + '\n') + logger.flush() + print('\n' + table.table) + + return metric_results diff --git a/models/Scanrefer/lib/loss.py b/models/Scanrefer/lib/loss.py new file mode 100644 index 0000000..1e962e3 --- /dev/null +++ b/models/Scanrefer/lib/loss.py @@ -0,0 +1,39 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class SoftmaxRankingLoss(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, inputs, targets): + # input check + assert inputs.shape == targets.shape + + # compute the probabilities + probs = F.softmax(inputs + 1e-8, dim=1) + + # reduction + loss = -torch.sum(torch.log(probs + 1e-8) * targets, dim=1).mean() + + return loss + + +class SigmoidRankingLoss(nn.Module): + + def __init__(self): + super().__init__() + + def forward(self, inputs, targets): + # input check + assert inputs.shape == targets.shape + + # compute the probabilities + probs = torch.sigmoid(inputs) + + # reduction + loss = -torch.sum(torch.log(probs + 1e-8) * targets, dim=1).mean() + + return loss diff --git a/models/Scanrefer/lib/loss_helper.py b/models/Scanrefer/lib/loss_helper.py new file mode 100644 index 0000000..9deb8e7 --- /dev/null +++ b/models/Scanrefer/lib/loss_helper.py @@ -0,0 +1,426 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. + +import os +import sys + +import numpy as np +import torch +import torch.nn as nn + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from lib.ap_helper import parse_predictions +from lib.euler_utils import (axis_aligned_bbox_overlaps_3d, bbox_to_corners, + chamfer_distance, euler_iou3d) +from lib.loss import SigmoidRankingLoss, SoftmaxRankingLoss +from utils.box_util import (box3d_iou, box3d_iou_batch, get_3d_box, + get_3d_box_batch) +from utils.nn_distance import huber_loss, nn_distance + +FAR_THRESHOLD = 0.6 +NEAR_THRESHOLD = 0.3 +GT_VOTE_FACTOR = 3 # number of GT votes per point +OBJECTNESS_CLS_WEIGHTS = [0.2, + 0.8] # put larger weights on positive objectness + + +def compute_vote_loss(data_dict): + """Compute vote loss: Match predicted votes to GT votes. + + Args: + data_dict: dict (read-only) + + Returns: + vote_loss: scalar Tensor + + Overall idea: + If the seed point belongs to an object (votes_label_mask == 1), + then we require it to vote for the object center. + + Each seed point may vote for multiple translations v1,v2,v3 + A seed point may also be in the boxes of multiple objects: + o1,o2,o3 with corresponding GT votes c1,c2,c3 + + Then the loss for this seed point is: + min(d(v_i,c_j)) for i=1,2,3 and j=1,2,3 + """ + + # Load ground truth votes and assign them to seed points + batch_size = data_dict['seed_xyz'].shape[0] + num_seed = data_dict['seed_xyz'].shape[1] # B,num_seed,3 + vote_xyz = data_dict['vote_xyz'] # B,num_seed*vote_factor,3 + seed_inds = data_dict['seed_inds'].long() # B,num_seed in [0,num_points-1] + + # Get groundtruth votes for the seed points + # vote_label_mask: Use gather to select B,num_seed from B,num_point + # non-object point has no GT vote mask = 0, object point has mask = 1 + # vote_label: Use gather to select B,num_seed,9 from B,num_point,9 + # with inds in shape B,num_seed,9 and 9 = GT_VOTE_FACTOR * 3 + seed_gt_votes_mask = torch.gather(data_dict['vote_label_mask'], 1, + seed_inds) + seed_inds_expand = seed_inds.view(batch_size, num_seed, + 1).repeat(1, 1, 3 * GT_VOTE_FACTOR) + seed_gt_votes = torch.gather(data_dict['vote_label'], 1, seed_inds_expand) + seed_gt_votes += data_dict['seed_xyz'].repeat(1, 1, 3) + + # Compute the min of min of distance + vote_xyz_reshape = vote_xyz.view( + batch_size * num_seed, -1, + 3) # from B,num_seed*vote_factor,3 to B*num_seed,vote_factor,3 + seed_gt_votes_reshape = seed_gt_votes.view( + batch_size * num_seed, GT_VOTE_FACTOR, + 3) # from B,num_seed,3*GT_VOTE_FACTOR to B*num_seed,GT_VOTE_FACTOR,3 + # A predicted vote to no where is not penalized as long as there is a good vote near the GT vote. + dist1, _, dist2, _ = nn_distance(vote_xyz_reshape, + seed_gt_votes_reshape, + l1=True) + votes_dist, _ = torch.min( + dist2, dim=1) # (B*num_seed,vote_factor) to (B*num_seed,) + votes_dist = votes_dist.view(batch_size, num_seed) + vote_loss = torch.sum(votes_dist * seed_gt_votes_mask.float()) / ( + torch.sum(seed_gt_votes_mask.float()) + 1e-6) + return vote_loss + + +def compute_objectness_loss(data_dict): + """Compute objectness loss for the proposals. + + Args: + data_dict: dict (read-only) + + Returns: + objectness_loss: scalar Tensor + objectness_label: (batch_size, num_seed) Tensor with value 0 or 1 + objectness_mask: (batch_size, num_seed) Tensor with value 0 or 1 + object_assignment: (batch_size, num_seed) Tensor with long int + within [0,num_gt_object-1] + """ + # Associate proposal and GT objects by point-to-point distances + aggregated_vote_xyz = data_dict['aggregated_vote_xyz'] + gt_center = data_dict['center_label'][:, :, 0:3] + B = gt_center.shape[0] + K = aggregated_vote_xyz.shape[1] + K2 = gt_center.shape[1] + dist1, ind1, dist2, _ = nn_distance(aggregated_vote_xyz, + gt_center) # dist1: BxK, dist2: BxK2 + + # Generate objectness label and mask + # objectness_label: 1 if pred object center is within NEAR_THRESHOLD of any GT object + # objectness_mask: 0 if pred object center is in gray zone (DONOTCARE), 1 otherwise + euclidean_dist1 = torch.sqrt(dist1 + 1e-6) + objectness_label = torch.zeros((B, K), dtype=torch.long).cuda() + objectness_mask = torch.zeros((B, K)).cuda() + objectness_label[euclidean_dist1 < NEAR_THRESHOLD] = 1 + objectness_mask[euclidean_dist1 < NEAR_THRESHOLD] = 1 + objectness_mask[euclidean_dist1 > FAR_THRESHOLD] = 1 + + # Compute objectness loss + objectness_scores = data_dict['objectness_scores'] + criterion = nn.CrossEntropyLoss( + torch.Tensor(OBJECTNESS_CLS_WEIGHTS).cuda(), reduction='none') + objectness_loss = criterion(objectness_scores.transpose(2, 1), + objectness_label) + objectness_loss = torch.sum(objectness_loss * objectness_mask) / ( + torch.sum(objectness_mask) + 1e-6) + + # Set assignment + object_assignment = ind1 # (B,K) with values in 0,1,...,K2-1 # TODO yesname + + return objectness_loss, objectness_label, objectness_mask, object_assignment + + +def compute_box_and_sem_cls_loss(data_dict, config): + """Compute 3D bounding box and semantic classification loss. + + Args: + data_dict: dict (read-only) + + Returns: + center_loss + heading_cls_loss + heading_reg_loss + size_cls_loss + size_reg_loss + sem_cls_loss + """ + + num_heading_bin = config.num_heading_bin + num_size_cluster = config.num_size_cluster + num_class = config.num_class + mean_size_arr = config.mean_size_arr + + object_assignment = data_dict['object_assignment'] + batch_size = object_assignment.shape[0] + + # Compute center loss + pred_center = data_dict['center'] + gt_center = data_dict['center_label'][:, :, 0:3] + dist1, ind1, dist2, _ = nn_distance(pred_center, + gt_center) # dist1: BxK, dist2: BxK2 + box_label_mask = data_dict['box_label_mask'] + objectness_label = data_dict['objectness_label'].float() + centroid_reg_loss1 = \ + torch.sum(dist1*objectness_label)/(torch.sum(objectness_label)+1e-6) + centroid_reg_loss2 = \ + torch.sum(dist2*box_label_mask)/(torch.sum(box_label_mask)+1e-6) + center_loss = centroid_reg_loss1 + centroid_reg_loss2 + + # Compute size losslib/loss_helper.py + size_class_label = torch.gather( + data_dict['size_class_label'], 1, + object_assignment) # select (B,K) from (B,K2) + criterion_size_class = nn.CrossEntropyLoss(reduction='none') + size_class_loss = criterion_size_class(data_dict['size_scores'].transpose( + 2, 1), size_class_label) # (B,K) + size_class_loss = torch.sum(size_class_loss * objectness_label) / ( + torch.sum(objectness_label) + 1e-6) + + size_residual_label = torch.gather(data_dict['size_residual_label'], 1, + object_assignment.unsqueeze(-1).repeat( + 1, 1, + 3)) # select (B,K,3) from (B,K2,3) + size_label_one_hot = torch.cuda.FloatTensor(batch_size, + size_class_label.shape[1], + num_size_cluster).zero_() + size_label_one_hot.scatter_( + 2, size_class_label.unsqueeze(-1), + 1) # src==1 so it's *one-hot* (B,K,num_size_cluster) + size_label_one_hot_tiled = size_label_one_hot.unsqueeze(-1).repeat( + 1, 1, 1, 3) # (B,K,num_size_cluster,3) + predicted_size_residual_normalized = torch.sum( + data_dict['size_residuals_normalized'] * size_label_one_hot_tiled, + 2) # (B,K,3) + + mean_size_arr_expanded = torch.from_numpy(mean_size_arr.astype( + np.float32)).cuda().unsqueeze(0).unsqueeze( + 0) # (1,1,num_size_cluster,3) + mean_size_label = torch.sum(size_label_one_hot_tiled * + mean_size_arr_expanded, 2) # (B,K,3) + size_residual_label_normalized = size_residual_label / mean_size_label # (B,K,3) + size_residual_normalized_loss = torch.mean( + huber_loss(predicted_size_residual_normalized - + size_residual_label_normalized, + delta=1.0), -1) # (B,K,3) -> (B,K) + size_residual_normalized_loss = torch.sum( + size_residual_normalized_loss * + objectness_label) / (torch.sum(objectness_label) + 1e-6) + + pred_size_class = torch.argmax(data_dict['size_scores'], -1) + pred_size = torch.gather( + data_dict['size_calc'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 1, 3)) + pred_size = pred_size.squeeze(2) + # Compute heading loss # TODO yesname + target_rot_mat = torch.gather( + data_dict['target_rot_mat'], 1, + object_assignment.unsqueeze(-1).unsqueeze(-1).repeat(1, 1, 3, + 3)) # B, K, 3x3 + target_bbox = torch.gather(data_dict['target_bbox'], 1, + object_assignment.unsqueeze(-1).repeat( + 1, 1, 6)) # B, K, 6 + pred_rot_mat = data_dict['rot_mat'] + pred_corners = bbox_to_corners(pred_center, pred_size, pred_rot_mat) + target_corners = bbox_to_corners(target_bbox[:, :, :3], + target_bbox[:, :, 3:6], target_rot_mat) + bbox_cd_loss, _, _, _ = chamfer_distance(pred_corners, target_corners, 1.0, + 0.0, 'l1', 'mean') + + # heading_class_label = torch.gather(data_dict['heading_class_label'], 1, object_assignment) # select (B,K) from (B,K2) + # criterion_heading_class = nn.CrossEntropyLoss(reduction='none') + # heading_class_loss = criterion_heading_class(data_dict['heading_scores'].transpose(2,1), heading_class_label) # (B,K) + # heading_class_loss = torch.sum(heading_class_loss * objectness_label)/(torch.sum(objectness_label)+1e-6) + + # heading_residual_label = torch.gather(data_dict['heading_residual_label'], 1, object_assignment) # select (B,K) from (B,K2) + # heading_residual_normalized_label = heading_residual_label / (np.pi/num_heading_bin) + + # # Ref: https://discuss.pytorch.org/t/convert-int-into-one-hot-format/507/3 + # heading_label_one_hot = torch.cuda.FloatTensor(batch_size, heading_class_label.shape[1], num_heading_bin).zero_() + # heading_label_one_hot.scatter_(2, heading_class_label.unsqueeze(-1), 1) # src==1 so it's *one-hot* (B,K,num_heading_bin) + # heading_residual_normalized_loss = huber_loss(torch.sum(data_dict['heading_residuals_normalized']*heading_label_one_hot, -1) - heading_residual_normalized_label, delta=1.0) # (B,K) + # heading_residual_normalized_loss = torch.sum(heading_residual_normalized_loss*objectness_label)/(torch.sum(objectness_label)+1e-6) + + # 3.4 Semantic cls loss + sem_cls_label = torch.gather(data_dict['sem_cls_label'], 1, + object_assignment) # select (B,K) from (B,K2) + criterion_sem_cls = nn.CrossEntropyLoss(reduction='none') + sem_cls_loss = criterion_sem_cls(data_dict['sem_cls_scores'].transpose( + 2, 1), sem_cls_label) # (B,K) + sem_cls_loss = torch.sum( + sem_cls_loss * objectness_label) / (torch.sum(objectness_label) + 1e-6) + + return center_loss, bbox_cd_loss, size_class_loss, size_residual_normalized_loss, sem_cls_loss + + +def compute_reference_loss(data_dict, config): + """Compute cluster reference loss. + + Args: + data_dict: dict (read-only) + + Returns: + ref_loss, lang_loss, cluster_preds, cluster_labels + """ + + # unpack + cluster_preds = data_dict['cluster_ref'] # (B, num_proposal) + + # predicted bbox + pred_ref = data_dict['cluster_ref'].detach() # (B,) + pred_center = data_dict['center'].detach() # (B,K,3) + bsz = pred_center.shape[0] + + pred_rot_mat = data_dict['rot_mat'].detach() + pred_size_class = torch.argmax(data_dict['size_scores'], + -1) # B,num_proposal + pred_size_residual = torch.gather( + data_dict['size_residuals'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat( + 1, 1, 1, 3)) # B,num_proposal,1,3 + pred_size = torch.gather( + data_dict['size_calc'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat( + 1, 1, 1, 3)) # B,num_proposal,1,3 + pred_size = pred_size.squeeze(2) + pred_size_class = pred_size_class.detach() + pred_size_residual = pred_size_residual.squeeze( + 2).detach() # B,num_proposal,3 + + # ground truth bbox + gt_center = data_dict['center_label'].detach() # (B, max_obj_per_scene, 3) + gt_rot_mat = data_dict['target_rot_mat'].detach() + gt_boxes = data_dict['target_bbox'].detach() + gt_size = gt_boxes[:, :, 3:6].detach() + gt_size = gt_size.clamp(min=2e-2) + gt_ref = data_dict['ref_box_label'].detach() + + # set precision to float64 when computing gt corners + gt_corners = bbox_to_corners(gt_center.to(torch.float64), + gt_size.to(torch.float64), + gt_rot_mat.to(torch.float64)).to( + torch.float32) + # compute the iou score for all predictd positive ref + batch_size, num_proposals = cluster_preds.shape + labels = np.zeros((batch_size, num_proposals)) + for i in range(pred_ref.shape[0]): + # convert the bbox parameters to bbox corners + pred_boxes = torch.concat([pred_center[i], pred_size[i]], dim=1) + gt_boxes_single = gt_boxes[i, gt_ref[i]] + num_gts = gt_boxes_single.shape[0] + pred_corners = bbox_to_corners(pred_center[i], + pred_size[i].clamp(min=2e-2), + pred_rot_mat[i]) + ious = euler_iou3d(pred_corners, gt_corners[i, gt_ref[i]]) + # ious = axis_aligned_bbox_overlaps_3d(pred_boxes, gt_boxes_single[:, :6]) + for j in range(num_gts): + labels[i, ious[:, j].argmax( + )] = 1 # treat the bbox with highest iou score as the gt + + cluster_labels = torch.FloatTensor(labels).cuda() + + # reference loss + criterion = SigmoidRankingLoss() + loss = criterion(cluster_preds, cluster_labels.float().clone()) + + return loss, cluster_preds, cluster_labels + + +def compute_lang_classification_loss(data_dict): + criterion = torch.nn.BCEWithLogitsLoss() + loss = criterion(data_dict['lang_scores'], data_dict['ref_class_label']) + + return loss + + +def get_loss(data_dict, + config, + detection=True, + reference=True, + use_lang_classifier=False): + """Loss functions. + + Args: + data_dict: dict + config: dataset config instance + reference: flag (False/True) + Returns: + loss: pytorch scalar tensor + data_dict: dict + """ + + # import pdb + # pdb.set_trace() + + # Vote loss + vote_loss = compute_vote_loss(data_dict) + + # Obj loss + objectness_loss, objectness_label, objectness_mask, object_assignment = compute_objectness_loss( + data_dict) + num_proposal = objectness_label.shape[1] + total_num_proposal = objectness_label.shape[0] * objectness_label.shape[1] + data_dict['objectness_label'] = objectness_label + data_dict['objectness_mask'] = objectness_mask + data_dict['object_assignment'] = object_assignment + data_dict['pos_ratio'] = torch.sum( + objectness_label.float().cuda()) / float(total_num_proposal) + data_dict['neg_ratio'] = torch.sum(objectness_mask.float()) / float( + total_num_proposal) - data_dict['pos_ratio'] + + # Box loss and sem cls loss + center_loss, bbox_cd_loss, size_cls_loss, size_reg_loss, sem_cls_loss = compute_box_and_sem_cls_loss( + data_dict, config) + box_loss = center_loss + bbox_cd_loss + 0.1 * size_cls_loss + size_reg_loss + + if detection: + data_dict['vote_loss'] = vote_loss + data_dict['objectness_loss'] = objectness_loss + data_dict['center_loss'] = center_loss + data_dict['bbox_cd_loss'] = bbox_cd_loss + data_dict['size_cls_loss'] = size_cls_loss + data_dict['size_reg_loss'] = size_reg_loss + data_dict['sem_cls_loss'] = sem_cls_loss + data_dict['box_loss'] = box_loss + else: + data_dict['vote_loss'] = torch.zeros(1)[0].cuda() + data_dict['objectness_loss'] = torch.zeros(1)[0].cuda() + data_dict['center_loss'] = torch.zeros(1)[0].cuda() + data_dict['heading_cls_loss'] = torch.zeros(1)[0].cuda() + data_dict['heading_reg_loss'] = torch.zeros(1)[0].cuda() + data_dict['size_cls_loss'] = torch.zeros(1)[0].cuda() + data_dict['size_reg_loss'] = torch.zeros(1)[0].cuda() + data_dict['sem_cls_loss'] = torch.zeros(1)[0].cuda() + data_dict['box_loss'] = torch.zeros(1)[0].cuda() + + if reference: # TODO yesname + # Reference loss + ref_loss, _, cluster_labels = compute_reference_loss(data_dict, config) + data_dict['cluster_labels'] = cluster_labels + data_dict['ref_loss'] = ref_loss + else: + # # Reference loss + # ref_loss, _, cluster_labels = compute_reference_loss(data_dict, config) + # data_dict["cluster_labels"] = cluster_labels + data_dict['cluster_labels'] = objectness_label.new_zeros( + objectness_label.shape).cuda() + data_dict['cluster_ref'] = objectness_label.new_zeros( + objectness_label.shape).float().cuda() + + # store + data_dict['ref_loss'] = torch.zeros(1)[0].cuda() + + if reference and use_lang_classifier: + data_dict['lang_loss'] = compute_lang_classification_loss(data_dict) + else: + data_dict['lang_loss'] = torch.zeros(1)[0].cuda() + + # Final loss function + loss = data_dict['vote_loss'] + 0.5*data_dict['objectness_loss'] + data_dict['box_loss'] + 0.1*data_dict['sem_cls_loss'] \ + + 0.1*data_dict['ref_loss'] + 0.1*data_dict['lang_loss'] + + loss *= 10 # amplify + + data_dict['loss'] = loss + + return loss, data_dict diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/include/ball_query.h b/models/Scanrefer/lib/pointnet2/_ext_src/include/ball_query.h new file mode 100644 index 0000000..1bbc638 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/include/ball_query.h @@ -0,0 +1,5 @@ +#pragma once +#include + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample); diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/include/cuda_utils.h b/models/Scanrefer/lib/pointnet2/_ext_src/include/cuda_utils.h new file mode 100644 index 0000000..0fd5b6e --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/include/cuda_utils.h @@ -0,0 +1,41 @@ +#ifndef _CUDA_UTILS_H +#define _CUDA_UTILS_H + +#include +#include +#include + +#include +#include + +#include + +#define TOTAL_THREADS 512 + +inline int opt_n_threads(int work_size) { + const int pow_2 = std::log(static_cast(work_size)) / std::log(2.0); + + return max(min(1 << pow_2, TOTAL_THREADS), 1); +} + +inline dim3 opt_block_config(int x, int y) { + const int x_threads = opt_n_threads(x); + const int y_threads = + max(min(opt_n_threads(y), TOTAL_THREADS / x_threads), 1); + dim3 block_config(x_threads, y_threads, 1); + + return block_config; +} + +#define CUDA_CHECK_ERRORS() \ + do { \ + cudaError_t err = cudaGetLastError(); \ + if (cudaSuccess != err) { \ + fprintf(stderr, "CUDA kernel failed : %s\n%s at L:%d in %s\n", \ + cudaGetErrorString(err), __PRETTY_FUNCTION__, __LINE__, \ + __FILE__); \ + exit(-1); \ + } \ + } while (0) + +#endif diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/include/group_points.h b/models/Scanrefer/lib/pointnet2/_ext_src/include/group_points.h new file mode 100644 index 0000000..ad20cda --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/include/group_points.h @@ -0,0 +1,5 @@ +#pragma once +#include + +at::Tensor group_points(at::Tensor points, at::Tensor idx); +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/include/interpolate.h b/models/Scanrefer/lib/pointnet2/_ext_src/include/interpolate.h new file mode 100644 index 0000000..26b3464 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/include/interpolate.h @@ -0,0 +1,10 @@ +#pragma once + +#include +#include + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows); +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight); +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m); diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/include/sampling.h b/models/Scanrefer/lib/pointnet2/_ext_src/include/sampling.h new file mode 100644 index 0000000..d795271 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/include/sampling.h @@ -0,0 +1,6 @@ +#pragma once +#include + +at::Tensor gather_points(at::Tensor points, at::Tensor idx); +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, const int n); +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples); diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/include/utils.h b/models/Scanrefer/lib/pointnet2/_ext_src/include/utils.h new file mode 100644 index 0000000..5f080ed --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/include/utils.h @@ -0,0 +1,25 @@ +#pragma once +#include +#include + +#define CHECK_CUDA(x) \ + do { \ + AT_ASSERT(x.is_cuda(), #x " must be a CUDA tensor"); \ + } while (0) + +#define CHECK_CONTIGUOUS(x) \ + do { \ + AT_ASSERT(x.is_contiguous(), #x " must be a contiguous tensor"); \ + } while (0) + +#define CHECK_IS_INT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Int, \ + #x " must be an int tensor"); \ + } while (0) + +#define CHECK_IS_FLOAT(x) \ + do { \ + AT_ASSERT(x.scalar_type() == at::ScalarType::Float, \ + #x " must be a float tensor"); \ + } while (0) diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/ball_query.cpp b/models/Scanrefer/lib/pointnet2/_ext_src/src/ball_query.cpp new file mode 100644 index 0000000..b1797c1 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/ball_query.cpp @@ -0,0 +1,32 @@ +#include "ball_query.h" +#include "utils.h" + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx); + +at::Tensor ball_query(at::Tensor new_xyz, at::Tensor xyz, const float radius, + const int nsample) { + CHECK_CONTIGUOUS(new_xyz); + CHECK_CONTIGUOUS(xyz); + CHECK_IS_FLOAT(new_xyz); + CHECK_IS_FLOAT(xyz); + + if (new_xyz.is_cuda()) { + CHECK_CUDA(xyz); + } + + at::Tensor idx = + torch::zeros({new_xyz.size(0), new_xyz.size(1), nsample}, + at::device(new_xyz.device()).dtype(at::ScalarType::Int)); + + if (new_xyz.is_cuda()) { + query_ball_point_kernel_wrapper(xyz.size(0), xyz.size(1), new_xyz.size(1), + radius, nsample, new_xyz.data_ptr(), + xyz.data_ptr(), idx.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return idx; +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/ball_query_gpu.cu b/models/Scanrefer/lib/pointnet2/_ext_src/src/ball_query_gpu.cu new file mode 100644 index 0000000..559aef9 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/ball_query_gpu.cu @@ -0,0 +1,54 @@ +#include +#include +#include + +#include "cuda_utils.h" + +// input: new_xyz(b, m, 3) xyz(b, n, 3) +// output: idx(b, m, nsample) +__global__ void query_ball_point_kernel(int b, int n, int m, float radius, + int nsample, + const float *__restrict__ new_xyz, + const float *__restrict__ xyz, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + xyz += batch_index * n * 3; + new_xyz += batch_index * m * 3; + idx += m * nsample * batch_index; + + int index = threadIdx.x; + int stride = blockDim.x; + + float radius2 = radius * radius; + for (int j = index; j < m; j += stride) { + float new_x = new_xyz[j * 3 + 0]; + float new_y = new_xyz[j * 3 + 1]; + float new_z = new_xyz[j * 3 + 2]; + for (int k = 0, cnt = 0; k < n && cnt < nsample; ++k) { + float x = xyz[k * 3 + 0]; + float y = xyz[k * 3 + 1]; + float z = xyz[k * 3 + 2]; + float d2 = (new_x - x) * (new_x - x) + (new_y - y) * (new_y - y) + + (new_z - z) * (new_z - z); + if (d2 < radius2) { + if (cnt == 0) { + for (int l = 0; l < nsample; ++l) { + idx[j * nsample + l] = k; + } + } + idx[j * nsample + cnt] = k; + ++cnt; + } + } + } +} + +void query_ball_point_kernel_wrapper(int b, int n, int m, float radius, + int nsample, const float *new_xyz, + const float *xyz, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + query_ball_point_kernel<<>>( + b, n, m, radius, nsample, new_xyz, xyz, idx); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/bindings.cpp b/models/Scanrefer/lib/pointnet2/_ext_src/src/bindings.cpp new file mode 100644 index 0000000..d1916ce --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/bindings.cpp @@ -0,0 +1,19 @@ +#include "ball_query.h" +#include "group_points.h" +#include "interpolate.h" +#include "sampling.h" + +PYBIND11_MODULE(TORCH_EXTENSION_NAME, m) { + m.def("gather_points", &gather_points); + m.def("gather_points_grad", &gather_points_grad); + m.def("furthest_point_sampling", &furthest_point_sampling); + + m.def("three_nn", &three_nn); + m.def("three_interpolate", &three_interpolate); + m.def("three_interpolate_grad", &three_interpolate_grad); + + m.def("ball_query", &ball_query); + + m.def("group_points", &group_points); + m.def("group_points_grad", &group_points_grad); +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/group_points.cpp b/models/Scanrefer/lib/pointnet2/_ext_src/src/group_points.cpp new file mode 100644 index 0000000..285a4bd --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/group_points.cpp @@ -0,0 +1,62 @@ +#include "group_points.h" +#include "utils.h" + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out); + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points); + +at::Tensor group_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1), idx.size(2)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + group_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), idx.size(2), + points.data_ptr(), idx.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor group_points_grad(at::Tensor grad_out, at::Tensor idx, const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + group_points_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), n, idx.size(1), idx.size(2), + grad_out.data_ptr(), idx.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/group_points_gpu.cu b/models/Scanrefer/lib/pointnet2/_ext_src/src/group_points_gpu.cu new file mode 100644 index 0000000..57c2b1b --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/group_points_gpu.cu @@ -0,0 +1,75 @@ +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, npoints, nsample) +// output: out(b, c, npoints, nsample) +__global__ void group_points_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * n * c; + idx += batch_index * npoints * nsample; + out += batch_index * npoints * nsample * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + out[(l * npoints + j) * nsample + k] = points[l * n + ii]; + } + } +} + +void group_points_kernel_wrapper(int b, int c, int n, int npoints, int nsample, + const float *points, const int *idx, + float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_kernel<<>>( + b, c, n, npoints, nsample, points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, npoints, nsample), idx(b, npoints, nsample) +// output: grad_points(b, c, n) +__global__ void group_points_grad_kernel(int b, int c, int n, int npoints, + int nsample, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * npoints * nsample * c; + idx += batch_index * npoints * nsample; + grad_points += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * npoints; i += stride) { + const int l = i / npoints; + const int j = i % npoints; + for (int k = 0; k < nsample; ++k) { + int ii = idx[j * nsample + k]; + atomicAdd(grad_points + l * n + ii, + grad_out[(l * npoints + j) * nsample + k]); + } + } +} + +void group_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + int nsample, const float *grad_out, + const int *idx, float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + group_points_grad_kernel<<>>( + b, c, n, npoints, nsample, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/interpolate.cpp b/models/Scanrefer/lib/pointnet2/_ext_src/src/interpolate.cpp new file mode 100644 index 0000000..cdee31c --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/interpolate.cpp @@ -0,0 +1,99 @@ +#include "interpolate.h" +#include "utils.h" + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx); +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out); +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points); + +std::vector three_nn(at::Tensor unknowns, at::Tensor knows) { + CHECK_CONTIGUOUS(unknowns); + CHECK_CONTIGUOUS(knows); + CHECK_IS_FLOAT(unknowns); + CHECK_IS_FLOAT(knows); + + if (unknowns.is_cuda()) { + CHECK_CUDA(knows); + } + + at::Tensor idx = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Int)); + at::Tensor dist2 = + torch::zeros({unknowns.size(0), unknowns.size(1), 3}, + at::device(unknowns.device()).dtype(at::ScalarType::Float)); + + if (unknowns.is_cuda()) { + three_nn_kernel_wrapper(unknowns.size(0), unknowns.size(1), knows.size(1), + unknowns.data_ptr(), knows.data_ptr(), + dist2.data_ptr(), idx.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return {dist2, idx}; +} + +at::Tensor three_interpolate(at::Tensor points, at::Tensor idx, + at::Tensor weight) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + three_interpolate_kernel_wrapper( + points.size(0), points.size(1), points.size(2), idx.size(1), + points.data_ptr(), idx.data_ptr(), weight.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor three_interpolate_grad(at::Tensor grad_out, at::Tensor idx, + at::Tensor weight, const int m) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_CONTIGUOUS(weight); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + CHECK_IS_FLOAT(weight); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + CHECK_CUDA(weight); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), m}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + three_interpolate_grad_kernel_wrapper( + grad_out.size(0), grad_out.size(1), grad_out.size(2), m, + grad_out.data_ptr(), idx.data_ptr(), + weight.data_ptr(), output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/interpolate_gpu.cu b/models/Scanrefer/lib/pointnet2/_ext_src/src/interpolate_gpu.cu new file mode 100644 index 0000000..81c5548 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/interpolate_gpu.cu @@ -0,0 +1,154 @@ +#include +#include +#include + +#include "cuda_utils.h" + +// input: unknown(b, n, 3) known(b, m, 3) +// output: dist2(b, n, 3), idx(b, n, 3) +__global__ void three_nn_kernel(int b, int n, int m, + const float *__restrict__ unknown, + const float *__restrict__ known, + float *__restrict__ dist2, + int *__restrict__ idx) { + int batch_index = blockIdx.x; + unknown += batch_index * n * 3; + known += batch_index * m * 3; + dist2 += batch_index * n * 3; + idx += batch_index * n * 3; + + int index = threadIdx.x; + int stride = blockDim.x; + for (int j = index; j < n; j += stride) { + float ux = unknown[j * 3 + 0]; + float uy = unknown[j * 3 + 1]; + float uz = unknown[j * 3 + 2]; + + double best1 = 1e40, best2 = 1e40, best3 = 1e40; + int besti1 = 0, besti2 = 0, besti3 = 0; + for (int k = 0; k < m; ++k) { + float x = known[k * 3 + 0]; + float y = known[k * 3 + 1]; + float z = known[k * 3 + 2]; + float d = (ux - x) * (ux - x) + (uy - y) * (uy - y) + (uz - z) * (uz - z); + if (d < best1) { + best3 = best2; + besti3 = besti2; + best2 = best1; + besti2 = besti1; + best1 = d; + besti1 = k; + } else if (d < best2) { + best3 = best2; + besti3 = besti2; + best2 = d; + besti2 = k; + } else if (d < best3) { + best3 = d; + besti3 = k; + } + } + dist2[j * 3 + 0] = best1; + dist2[j * 3 + 1] = best2; + dist2[j * 3 + 2] = best3; + + idx[j * 3 + 0] = besti1; + idx[j * 3 + 1] = besti2; + idx[j * 3 + 2] = besti3; + } +} + +void three_nn_kernel_wrapper(int b, int n, int m, const float *unknown, + const float *known, float *dist2, int *idx) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_nn_kernel<<>>(b, n, m, unknown, known, + dist2, idx); + + CUDA_CHECK_ERRORS(); +} + +// input: points(b, c, m), idx(b, n, 3), weight(b, n, 3) +// output: out(b, c, n) +__global__ void three_interpolate_kernel(int b, int c, int m, int n, + const float *__restrict__ points, + const int *__restrict__ idx, + const float *__restrict__ weight, + float *__restrict__ out) { + int batch_index = blockIdx.x; + points += batch_index * m * c; + + idx += batch_index * n * 3; + weight += batch_index * n * 3; + + out += batch_index * n * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + out[i] = points[l * m + i1] * w1 + points[l * m + i2] * w2 + + points[l * m + i3] * w3; + } +} + +void three_interpolate_kernel_wrapper(int b, int c, int m, int n, + const float *points, const int *idx, + const float *weight, float *out) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_kernel<<>>( + b, c, m, n, points, idx, weight, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, n), idx(b, n, 3), weight(b, n, 3) +// output: grad_points(b, c, m) + +__global__ void three_interpolate_grad_kernel( + int b, int c, int n, int m, const float *__restrict__ grad_out, + const int *__restrict__ idx, const float *__restrict__ weight, + float *__restrict__ grad_points) { + int batch_index = blockIdx.x; + grad_out += batch_index * n * c; + idx += batch_index * n * 3; + weight += batch_index * n * 3; + grad_points += batch_index * m * c; + + const int index = threadIdx.y * blockDim.x + threadIdx.x; + const int stride = blockDim.y * blockDim.x; + for (int i = index; i < c * n; i += stride) { + const int l = i / n; + const int j = i % n; + float w1 = weight[j * 3 + 0]; + float w2 = weight[j * 3 + 1]; + float w3 = weight[j * 3 + 2]; + + int i1 = idx[j * 3 + 0]; + int i2 = idx[j * 3 + 1]; + int i3 = idx[j * 3 + 2]; + + atomicAdd(grad_points + l * m + i1, grad_out[i] * w1); + atomicAdd(grad_points + l * m + i2, grad_out[i] * w2); + atomicAdd(grad_points + l * m + i3, grad_out[i] * w3); + } +} + +void three_interpolate_grad_kernel_wrapper(int b, int c, int n, int m, + const float *grad_out, + const int *idx, const float *weight, + float *grad_points) { + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + three_interpolate_grad_kernel<<>>( + b, c, n, m, grad_out, idx, weight, grad_points); + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/sampling.cpp b/models/Scanrefer/lib/pointnet2/_ext_src/src/sampling.cpp new file mode 100644 index 0000000..ddbdc11 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/sampling.cpp @@ -0,0 +1,87 @@ +#include "sampling.h" +#include "utils.h" + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out); +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points); + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs); + +at::Tensor gather_points(at::Tensor points, at::Tensor idx) { + CHECK_CONTIGUOUS(points); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(points); + CHECK_IS_INT(idx); + + if (points.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({points.size(0), points.size(1), idx.size(1)}, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + gather_points_kernel_wrapper(points.size(0), points.size(1), points.size(2), + idx.size(1), points.data_ptr(), + idx.data_ptr(), output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} + +at::Tensor gather_points_grad(at::Tensor grad_out, at::Tensor idx, + const int n) { + CHECK_CONTIGUOUS(grad_out); + CHECK_CONTIGUOUS(idx); + CHECK_IS_FLOAT(grad_out); + CHECK_IS_INT(idx); + + if (grad_out.is_cuda()) { + CHECK_CUDA(idx); + } + + at::Tensor output = + torch::zeros({grad_out.size(0), grad_out.size(1), n}, + at::device(grad_out.device()).dtype(at::ScalarType::Float)); + + if (grad_out.is_cuda()) { + gather_points_grad_kernel_wrapper(grad_out.size(0), grad_out.size(1), n, + idx.size(1), grad_out.data_ptr(), + idx.data_ptr(), + output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} +at::Tensor furthest_point_sampling(at::Tensor points, const int nsamples) { + CHECK_CONTIGUOUS(points); + CHECK_IS_FLOAT(points); + + at::Tensor output = + torch::zeros({points.size(0), nsamples}, + at::device(points.device()).dtype(at::ScalarType::Int)); + + at::Tensor tmp = + torch::full({points.size(0), points.size(1)}, 1e10, + at::device(points.device()).dtype(at::ScalarType::Float)); + + if (points.is_cuda()) { + furthest_point_sampling_kernel_wrapper( + points.size(0), points.size(1), nsamples, points.data_ptr(), + tmp.data_ptr(), output.data_ptr()); + } else { + AT_ASSERT(false, "CPU not supported"); + } + + return output; +} diff --git a/models/Scanrefer/lib/pointnet2/_ext_src/src/sampling_gpu.cu b/models/Scanrefer/lib/pointnet2/_ext_src/src/sampling_gpu.cu new file mode 100644 index 0000000..fc573f0 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_ext_src/src/sampling_gpu.cu @@ -0,0 +1,229 @@ +#include +#include + +#include "cuda_utils.h" + +// input: points(b, c, n) idx(b, m) +// output: out(b, c, m) +__global__ void gather_points_kernel(int b, int c, int n, int m, + const float *__restrict__ points, + const int *__restrict__ idx, + float *__restrict__ out) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + out[(i * c + l) * m + j] = points[(i * c + l) * n + a]; + } + } + } +} + +void gather_points_kernel_wrapper(int b, int c, int n, int npoints, + const float *points, const int *idx, + float *out) { + gather_points_kernel<<>>(b, c, n, npoints, + points, idx, out); + + CUDA_CHECK_ERRORS(); +} + +// input: grad_out(b, c, m) idx(b, m) +// output: grad_points(b, c, n) +__global__ void gather_points_grad_kernel(int b, int c, int n, int m, + const float *__restrict__ grad_out, + const int *__restrict__ idx, + float *__restrict__ grad_points) { + for (int i = blockIdx.x; i < b; i += gridDim.x) { + for (int l = blockIdx.y; l < c; l += gridDim.y) { + for (int j = threadIdx.x; j < m; j += blockDim.x) { + int a = idx[i * m + j]; + atomicAdd(grad_points + (i * c + l) * n + a, + grad_out[(i * c + l) * m + j]); + } + } + } +} + +void gather_points_grad_kernel_wrapper(int b, int c, int n, int npoints, + const float *grad_out, const int *idx, + float *grad_points) { + gather_points_grad_kernel<<>>( + b, c, n, npoints, grad_out, idx, grad_points); + + CUDA_CHECK_ERRORS(); +} + +__device__ void __update(float *__restrict__ dists, int *__restrict__ dists_i, + int idx1, int idx2) { + const float v1 = dists[idx1], v2 = dists[idx2]; + const int i1 = dists_i[idx1], i2 = dists_i[idx2]; + dists[idx1] = max(v1, v2); + dists_i[idx1] = v2 > v1 ? i2 : i1; +} + +// Input dataset: (b, n, 3), tmp: (b, n) +// Ouput idxs (b, m) +template +__global__ void furthest_point_sampling_kernel( + int b, int n, int m, const float *__restrict__ dataset, + float *__restrict__ temp, int *__restrict__ idxs) { + if (m <= 0) return; + __shared__ float dists[block_size]; + __shared__ int dists_i[block_size]; + + int batch_index = blockIdx.x; + dataset += batch_index * n * 3; + temp += batch_index * n; + idxs += batch_index * m; + + int tid = threadIdx.x; + const int stride = block_size; + + int old = 0; + if (threadIdx.x == 0) idxs[0] = old; + + __syncthreads(); + for (int j = 1; j < m; j++) { + int besti = 0; + float best = -1; + float x1 = dataset[old * 3 + 0]; + float y1 = dataset[old * 3 + 1]; + float z1 = dataset[old * 3 + 2]; + for (int k = tid; k < n; k += stride) { + float x2, y2, z2; + x2 = dataset[k * 3 + 0]; + y2 = dataset[k * 3 + 1]; + z2 = dataset[k * 3 + 2]; + float mag = (x2 * x2) + (y2 * y2) + (z2 * z2); + if (mag <= 1e-3) continue; + + float d = + (x2 - x1) * (x2 - x1) + (y2 - y1) * (y2 - y1) + (z2 - z1) * (z2 - z1); + + float d2 = min(d, temp[k]); + temp[k] = d2; + besti = d2 > best ? k : besti; + best = d2 > best ? d2 : best; + } + dists[tid] = best; + dists_i[tid] = besti; + __syncthreads(); + + if (block_size >= 512) { + if (tid < 256) { + __update(dists, dists_i, tid, tid + 256); + } + __syncthreads(); + } + if (block_size >= 256) { + if (tid < 128) { + __update(dists, dists_i, tid, tid + 128); + } + __syncthreads(); + } + if (block_size >= 128) { + if (tid < 64) { + __update(dists, dists_i, tid, tid + 64); + } + __syncthreads(); + } + if (block_size >= 64) { + if (tid < 32) { + __update(dists, dists_i, tid, tid + 32); + } + __syncthreads(); + } + if (block_size >= 32) { + if (tid < 16) { + __update(dists, dists_i, tid, tid + 16); + } + __syncthreads(); + } + if (block_size >= 16) { + if (tid < 8) { + __update(dists, dists_i, tid, tid + 8); + } + __syncthreads(); + } + if (block_size >= 8) { + if (tid < 4) { + __update(dists, dists_i, tid, tid + 4); + } + __syncthreads(); + } + if (block_size >= 4) { + if (tid < 2) { + __update(dists, dists_i, tid, tid + 2); + } + __syncthreads(); + } + if (block_size >= 2) { + if (tid < 1) { + __update(dists, dists_i, tid, tid + 1); + } + __syncthreads(); + } + + old = dists_i[0]; + if (tid == 0) idxs[j] = old; + } +} + +void furthest_point_sampling_kernel_wrapper(int b, int n, int m, + const float *dataset, float *temp, + int *idxs) { + unsigned int n_threads = opt_n_threads(n); + + cudaStream_t stream = at::cuda::getCurrentCUDAStream(); + + switch (n_threads) { + case 512: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 256: + furthest_point_sampling_kernel<256> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 128: + furthest_point_sampling_kernel<128> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 64: + furthest_point_sampling_kernel<64> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 32: + furthest_point_sampling_kernel<32> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 16: + furthest_point_sampling_kernel<16> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 8: + furthest_point_sampling_kernel<8> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 4: + furthest_point_sampling_kernel<4> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 2: + furthest_point_sampling_kernel<2> + <<>>(b, n, m, dataset, temp, idxs); + break; + case 1: + furthest_point_sampling_kernel<1> + <<>>(b, n, m, dataset, temp, idxs); + break; + default: + furthest_point_sampling_kernel<512> + <<>>(b, n, m, dataset, temp, idxs); + } + + CUDA_CHECK_ERRORS(); +} diff --git a/models/Scanrefer/lib/pointnet2/_version.py b/models/Scanrefer/lib/pointnet2/_version.py new file mode 100644 index 0000000..4eb28e3 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/_version.py @@ -0,0 +1 @@ +__version__ = '3.0.0' diff --git a/models/Scanrefer/lib/pointnet2/pointnet2_modules.py b/models/Scanrefer/lib/pointnet2/pointnet2_modules.py new file mode 100644 index 0000000..2631d57 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/pointnet2_modules.py @@ -0,0 +1,526 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Pointnet2 layers. + +Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch +Extended with the following: +1. Uniform sampling in each local region (sample_uniformly) +2. Return sampled points indices to support votenet. +""" +import os +import sys + +import torch +import torch.nn as nn +import torch.nn.functional as F + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +from typing import List + +import pointnet2_utils +import pytorch_utils as pt_utils + + +class _PointnetSAModuleBase(nn.Module): + + def __init__(self): + super().__init__() + self.npoint = None + self.groupers = None + self.mlps = None + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, N, C) tensor of the descriptors of the the features + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, npoint, \sum_k(mlps[k][-1])) tensor of the new_features descriptors + """ + + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, pointnet2_utils.furthest_point_sample( + xyz, self.npoint)).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features) # (B, C, npoint, nsample) + + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1) + + +class PointnetSAModuleMSG(_PointnetSAModuleBase): + r"""Pointnet set abstrction layer with multiscale grouping + + Parameters + ---------- + npoint : int + Number of features + radii : list of float32 + list of radii to group with + nsamples : list of int32 + Number of samples in each ball query + mlps : list of list of int32 + Spec of the pointnet before the global max_pool for each scale + bn : bool + Use batchnorm + """ + + def __init__(self, + *, + npoint: int, + radii: List[float], + nsamples: List[int], + mlps: List[List[int]], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert len(radii) == len(nsamples) == len(mlps) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly + ) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + +class PointnetSAModule(PointnetSAModuleMSG): + r"""Pointnet set abstrction layer + + Parameters + ---------- + npoint : int + Number of features + radius : float + Radius of ball + nsample : int + Number of samples in the ball query + mlp : list + Spec of the pointnet before the global max_pool + bn : bool + Use batchnorm + """ + + def __init__(self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True): + super().__init__(mlps=[mlp], + npoint=npoint, + radii=[radius], + nsamples=[nsample], + bn=bn, + use_xyz=use_xyz) + + +class PointnetSAModuleVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG with + extra support for returning point indices for getting their GT votes.""" + + def __init__( + self, + *, + mlp: List[int], + npoint: int = None, + radius: float = None, + nsample: int = None, + bn: bool = True, + use_xyz: bool = True, + pooling: str = 'max', + sigma: float = None, # for RBF pooling + normalize_xyz: bool = False, # noramlize local XYZ with radius + sample_uniformly: bool = False, + ret_unique_cnt: bool = False): + super().__init__() + + self.npoint = npoint + self.radius = radius + self.nsample = nsample + self.pooling = pooling + self.mlp_module = None + self.use_xyz = use_xyz + self.sigma = sigma + if self.sigma is None: + self.sigma = self.radius / 2 + self.normalize_xyz = normalize_xyz + self.ret_unique_cnt = ret_unique_cnt + + if npoint is not None: + self.grouper = pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + ret_grouped_xyz=True, + normalize_xyz=normalize_xyz, + sample_uniformly=sample_uniformly, + ret_unique_cnt=ret_unique_cnt) + else: + self.grouper = pointnet2_utils.GroupAll(use_xyz, + ret_grouped_xyz=True) + + mlp_spec = mlp + if use_xyz and len(mlp_spec) > 0: + mlp_spec[0] += 3 + self.mlp_module = pt_utils.SharedMLP(mlp_spec, bn=bn) + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, N) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + else: + assert (inds.shape[1] == self.npoint) + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, inds).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + if not self.ret_unique_cnt: + grouped_features, grouped_xyz = self.grouper( + xyz, new_xyz, features) # (B, C, npoint, nsample) + else: + grouped_features, grouped_xyz, unique_cnt = self.grouper( + xyz, new_xyz, features + ) # (B, C, npoint, nsample), (B,3,npoint,nsample), (B,npoint) + + new_features = self.mlp_module( + grouped_features) # (B, mlp[-1], npoint, nsample) + if self.pooling == 'max': + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + elif self.pooling == 'avg': + new_features = F.avg_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + elif self.pooling == 'rbf': + # Use radial basis function kernel for weighted sum of features (normalized by nsample and sigma) + # Ref: https://en.wikipedia.org/wiki/Radial_basis_function_kernel + rbf = torch.exp(-1 * grouped_xyz.pow(2).sum(1, keepdim=False) / + (self.sigma**2) / 2) # (B, npoint, nsample) + new_features = torch.sum( + new_features * rbf.unsqueeze(1), -1, keepdim=True) / float( + self.nsample) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + if not self.ret_unique_cnt: + return new_xyz, new_features, inds + else: + return new_xyz, new_features, inds, unique_cnt + + +class PointnetSAModuleMSGVotes(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG with + extra support for returning point indices for getting their GT votes.""" + + def __init__(self, + *, + mlps: List[List[int]], + npoint: int, + radii: List[float], + nsamples: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert (len(mlps) == len(nsamples) == len(radii)) + + self.npoint = npoint + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup(radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly + ) + if npoint is not None else pointnet2_utils.GroupAll(use_xyz)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward(self, + xyz: torch.Tensor, + features: torch.Tensor = None, + inds: torch.Tensor = None) -> (torch.Tensor, torch.Tensor): + r""" + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor of the xyz coordinates of the features + features : torch.Tensor + (B, C, C) tensor of the descriptors of the the features + inds : torch.Tensor + (B, npoint) tensor that stores index to the xyz points (values in 0-N-1) + + Returns + ------- + new_xyz : torch.Tensor + (B, npoint, 3) tensor of the new features' xyz + new_features : torch.Tensor + (B, \sum_k(mlps[k][-1]), npoint) tensor of the new_features descriptors + inds: torch.Tensor + (B, npoint) tensor of the inds + """ + new_features_list = [] + + xyz_flipped = xyz.transpose(1, 2).contiguous() + if inds is None: + inds = pointnet2_utils.furthest_point_sample(xyz, self.npoint) + new_xyz = pointnet2_utils.gather_operation( + xyz_flipped, inds).transpose( + 1, 2).contiguous() if self.npoint is not None else None + + for i in range(len(self.groupers)): + new_features = self.groupers[i]( + xyz, new_xyz, features) # (B, C, npoint, nsample) + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], npoint, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[ + 1, new_features.size(3) + ]) # (B, mlp[-1], npoint, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], npoint) + + new_features_list.append(new_features) + + return new_xyz, torch.cat(new_features_list, dim=1), inds + + +class PointnetFPModule(nn.Module): + r"""Propigates the features of one set to another + + Parameters + ---------- + mlp : list + Pointnet module parameters + bn : bool + Use batchnorm + """ + + def __init__(self, *, mlp: List[int], bn: bool = True): + super().__init__() + self.mlp = pt_utils.SharedMLP(mlp, bn=bn) + + def forward(self, unknown: torch.Tensor, known: torch.Tensor, + unknow_feats: torch.Tensor, + known_feats: torch.Tensor) -> torch.Tensor: + r""" + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of the xyz positions of the unknown features + known : torch.Tensor + (B, m, 3) tensor of the xyz positions of the known features + unknow_feats : torch.Tensor + (B, C1, n) tensor of the features to be propigated to + known_feats : torch.Tensor + (B, C2, m) tensor of features to be propigated + + Returns + ------- + new_features : torch.Tensor + (B, mlp[-1], n) tensor of the features of the unknown features + """ + + if known is not None: + dist, idx = pointnet2_utils.three_nn(unknown, known) + dist_recip = 1.0 / (dist + 1e-8) + norm = torch.sum(dist_recip, dim=2, keepdim=True) + weight = dist_recip / norm + + interpolated_feats = pointnet2_utils.three_interpolate( + known_feats, idx, weight) + else: + interpolated_feats = known_feats.expand(*known_feats.size()[0:2], + unknown.size(1)) + + if unknow_feats is not None: + new_features = torch.cat([interpolated_feats, unknow_feats], + dim=1) #(B, C2 + C1, n) + else: + new_features = interpolated_feats + + new_features = new_features.unsqueeze(-1) + new_features = self.mlp(new_features) + + return new_features.squeeze(-1) + + +class PointnetLFPModuleMSG(nn.Module): + """Modified based on _PointnetSAModuleBase and PointnetSAModuleMSG + learnable feature propagation layer.""" + + def __init__(self, + *, + mlps: List[List[int]], + radii: List[float], + nsamples: List[int], + post_mlp: List[int], + bn: bool = True, + use_xyz: bool = True, + sample_uniformly: bool = False): + super().__init__() + + assert (len(mlps) == len(nsamples) == len(radii)) + + self.post_mlp = pt_utils.SharedMLP(post_mlp, bn=bn) + + self.groupers = nn.ModuleList() + self.mlps = nn.ModuleList() + for i in range(len(radii)): + radius = radii[i] + nsample = nsamples[i] + self.groupers.append( + pointnet2_utils.QueryAndGroup( + radius, + nsample, + use_xyz=use_xyz, + sample_uniformly=sample_uniformly)) + mlp_spec = mlps[i] + if use_xyz: + mlp_spec[0] += 3 + + self.mlps.append(pt_utils.SharedMLP(mlp_spec, bn=bn)) + + def forward(self, xyz2: torch.Tensor, xyz1: torch.Tensor, + features2: torch.Tensor, + features1: torch.Tensor) -> torch.Tensor: + r""" Propagate features from xyz1 to xyz2. + Parameters + ---------- + xyz2 : torch.Tensor + (B, N2, 3) tensor of the xyz coordinates of the features + xyz1 : torch.Tensor + (B, N1, 3) tensor of the xyz coordinates of the features + features2 : torch.Tensor + (B, C2, N2) tensor of the descriptors of the the features + features1 : torch.Tensor + (B, C1, N1) tensor of the descriptors of the the features + + Returns + ------- + new_features1 : torch.Tensor + (B, \sum_k(mlps[k][-1]), N1) tensor of the new_features descriptors + """ + new_features_list = [] + + for i in range(len(self.groupers)): + new_features = self.groupers[i](xyz1, xyz2, + features1) # (B, C1, N2, nsample) + new_features = self.mlps[i]( + new_features) # (B, mlp[-1], N2, nsample) + new_features = F.max_pool2d(new_features, + kernel_size=[1, + new_features.size(3) + ]) # (B, mlp[-1], N2, 1) + new_features = new_features.squeeze(-1) # (B, mlp[-1], N2) + + if features2 is not None: + new_features = torch.cat([new_features, features2], + dim=1) #(B, mlp[-1] + C2, N2) + + new_features = new_features.unsqueeze(-1) + new_features = self.post_mlp(new_features) + + new_features_list.append(new_features) + + return torch.cat(new_features_list, dim=1).squeeze(-1) + + +if __name__ == '__main__': + from torch.autograd import Variable + torch.manual_seed(1) + torch.cuda.manual_seed_all(1) + xyz = Variable(torch.randn(2, 9, 3).cuda(), requires_grad=True) + xyz_feats = Variable(torch.randn(2, 9, 6).cuda(), requires_grad=True) + + test_module = PointnetSAModuleMSG(npoint=2, + radii=[5.0, 10.0], + nsamples=[6, 3], + mlps=[[9, 3], [9, 6]]) + test_module.cuda() + print(test_module(xyz, xyz_feats)) + + for _ in range(1): + _, new_features = test_module(xyz, xyz_feats) + new_features.backward( + torch.cuda.FloatTensor(*new_features.size()).fill_(1)) + print(new_features) + print(xyz.grad) diff --git a/models/Scanrefer/lib/pointnet2/pointnet2_test.py b/models/Scanrefer/lib/pointnet2/pointnet2_test.py new file mode 100644 index 0000000..6c7335b --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/pointnet2_test.py @@ -0,0 +1,38 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Testing customized ops.""" + +import os +import sys + +import numpy as np +import torch +from torch.autograd import gradcheck + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) +import pointnet2_utils + + +def test_interpolation_grad(): + batch_size = 1 + feat_dim = 2 + m = 4 + feats = torch.randn(batch_size, feat_dim, m, + requires_grad=True).float().cuda() + + def interpolate_func(inputs): + idx = torch.from_numpy(np.array([[[0, 1, 2], [1, 2, 3]]])).int().cuda() + weight = torch.from_numpy(np.array([[[1, 1, 1], [2, 2, + 2]]])).float().cuda() + interpolated_feats = pointnet2_utils.three_interpolate( + inputs, idx, weight) + return interpolated_feats + + assert (gradcheck(interpolate_func, feats, atol=1e-1, rtol=1e-1)) + + +if __name__ == '__main__': + test_interpolation_grad() diff --git a/models/Scanrefer/lib/pointnet2/pointnet2_utils.py b/models/Scanrefer/lib/pointnet2/pointnet2_utils.py new file mode 100644 index 0000000..fbdbd3b --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/pointnet2_utils.py @@ -0,0 +1,435 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Modified based on: https://github.com/erikwijmans/Pointnet2_PyTorch.""" +from __future__ import (absolute_import, division, print_function, + unicode_literals, with_statement) + +import sys + +import pytorch_utils as pt_utils +import torch +import torch.nn as nn +from torch.autograd import Function + +try: + import builtins +except: + import __builtin__ as builtins + +try: + import pointnet2._ext as _ext +except ImportError: + if not getattr(builtins, '__POINTNET2_SETUP__', False): + raise ImportError( + 'Could not import _ext module.\n' + 'Please see the setup instructions in the README: ' + 'https://github.com/erikwijmans/Pointnet2_PyTorch/blob/master/README.rst' + ) + +if False: + # Workaround for type hints without depending on the `typing` module + from typing import * + + +class RandomDropout(nn.Module): + + def __init__(self, p=0.5, inplace=False): + super(RandomDropout, self).__init__() + self.p = p + self.inplace = inplace + + def forward(self, X): + theta = torch.Tensor(1).uniform_(0, self.p)[0] + return pt_utils.feature_dropout_no_scaling(X, theta, self.train, + self.inplace) + + +class FurthestPointSampling(Function): + + @staticmethod + def forward(ctx, xyz, npoint): + # type: (Any, torch.Tensor, int) -> torch.Tensor + r""" + Uses iterative furthest point sampling to select a set of npoint features that have the largest + minimum distance + + Parameters + ---------- + xyz : torch.Tensor + (B, N, 3) tensor where N > npoint + npoint : int32 + number of features in the sampled set + + Returns + ------- + torch.Tensor + (B, npoint) tensor containing the set + """ + fps_inds = _ext.furthest_point_sampling(xyz, npoint) + ctx.mark_non_differentiable(fps_inds) + return fps_inds + + @staticmethod + def backward(xyz, a=None): + return None, None + + +furthest_point_sample = FurthestPointSampling.apply + + +class GatherOperation(Function): + + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor + + idx : torch.Tensor + (B, npoint) tensor of the features to gather + + Returns + ------- + torch.Tensor + (B, C, npoint) tensor + """ + + _, C, N = features.size() + + ctx.for_backwards = (idx, C, N) + + return _ext.gather_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + idx, C, N = ctx.for_backwards + + grad_features = _ext.gather_points_grad(grad_out.contiguous(), idx, N) + return grad_features, None + + +gather_operation = GatherOperation.apply + + +class ThreeNN(Function): + + @staticmethod + def forward(ctx, unknown, known): + # type: (Any, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + Find the three nearest neighbors of unknown in known + Parameters + ---------- + unknown : torch.Tensor + (B, n, 3) tensor of known features + known : torch.Tensor + (B, m, 3) tensor of unknown features + + Returns + ------- + dist : torch.Tensor + (B, n, 3) l2 distance to the three nearest neighbors + idx : torch.Tensor + (B, n, 3) index of 3 nearest neighbors + """ + dist2, idx = _ext.three_nn(unknown, known) + + return torch.sqrt(dist2), idx + + @staticmethod + def backward(ctx, a=None, b=None): + return None, None + + +three_nn = ThreeNN.apply + + +class ThreeInterpolate(Function): + + @staticmethod + def forward(ctx, features, idx, weight): + # type(Any, torch.Tensor, torch.Tensor, torch.Tensor) -> Torch.Tensor + r""" + Performs weight linear interpolation on 3 features + Parameters + ---------- + features : torch.Tensor + (B, c, m) Features descriptors to be interpolated from + idx : torch.Tensor + (B, n, 3) three nearest neighbors of the target features in features + weight : torch.Tensor + (B, n, 3) weights + + Returns + ------- + torch.Tensor + (B, c, n) tensor of the interpolated features + """ + B, c, m = features.size() + n = idx.size(1) + + ctx.three_interpolate_for_backward = (idx, weight, m) + + return _ext.three_interpolate(features, idx, weight) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor] + r""" + Parameters + ---------- + grad_out : torch.Tensor + (B, c, n) tensor with gradients of ouputs + + Returns + ------- + grad_features : torch.Tensor + (B, c, m) tensor with gradients of features + + None + + None + """ + idx, weight, m = ctx.three_interpolate_for_backward + + grad_features = _ext.three_interpolate_grad(grad_out.contiguous(), idx, + weight, m) + + return grad_features, None, None + + +three_interpolate = ThreeInterpolate.apply + + +class GroupingOperation(Function): + + @staticmethod + def forward(ctx, features, idx): + # type: (Any, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + features : torch.Tensor + (B, C, N) tensor of features to group + idx : torch.Tensor + (B, npoint, nsample) tensor containing the indicies of features to group with + + Returns + ------- + torch.Tensor + (B, C, npoint, nsample) tensor + """ + B, nfeatures, nsample = idx.size() + _, C, N = features.size() + + ctx.for_backwards = (idx, N) + + return _ext.group_points(features, idx) + + @staticmethod + def backward(ctx, grad_out): + # type: (Any, torch.tensor) -> Tuple[torch.Tensor, torch.Tensor] + r""" + + Parameters + ---------- + grad_out : torch.Tensor + (B, C, npoint, nsample) tensor of the gradients of the output from forward + + Returns + ------- + torch.Tensor + (B, C, N) gradient of the features + None + """ + idx, N = ctx.for_backwards + + grad_features = _ext.group_points_grad(grad_out.contiguous(), idx, N) + + return grad_features, None + + +grouping_operation = GroupingOperation.apply + + +class BallQuery(Function): + + @staticmethod + def forward(ctx, radius, nsample, xyz, new_xyz): + # type: (Any, float, int, torch.Tensor, torch.Tensor) -> torch.Tensor + r""" + + Parameters + ---------- + radius : float + radius of the balls + nsample : int + maximum number of features in the balls + xyz : torch.Tensor + (B, N, 3) xyz coordinates of the features + new_xyz : torch.Tensor + (B, npoint, 3) centers of the ball query + + Returns + ------- + torch.Tensor + (B, npoint, nsample) tensor with the indicies of the features that form the query balls + """ + inds = _ext.ball_query(new_xyz, xyz, radius, nsample) + ctx.mark_non_differentiable(inds) + return inds + + @staticmethod + def backward(ctx, a=None): + return None, None, None, None + + +ball_query = BallQuery.apply + + +class QueryAndGroup(nn.Module): + r""" + Groups with a ball query of radius + + Parameters + --------- + radius : float32 + Radius of ball + nsample : int32 + Maximum number of features to gather in the ball + """ + + def __init__(self, + radius, + nsample, + use_xyz=True, + ret_grouped_xyz=False, + normalize_xyz=False, + sample_uniformly=False, + ret_unique_cnt=False): + # type: (QueryAndGroup, float, int, bool) -> None + super(QueryAndGroup, self).__init__() + self.radius, self.nsample, self.use_xyz = radius, nsample, use_xyz + self.ret_grouped_xyz = ret_grouped_xyz + self.normalize_xyz = normalize_xyz + self.sample_uniformly = sample_uniformly + self.ret_unique_cnt = ret_unique_cnt + if self.ret_unique_cnt: + assert (self.sample_uniformly) + + def forward(self, xyz, new_xyz, features=None): + # type: (QueryAndGroup, torch.Tensor. torch.Tensor, torch.Tensor) -> Tuple[Torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + centriods (B, npoint, 3) + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, 3 + C, npoint, nsample) tensor + """ + idx = ball_query(self.radius, self.nsample, xyz, new_xyz) + + if self.sample_uniformly: + unique_cnt = torch.zeros((idx.shape[0], idx.shape[1])) + for i_batch in range(idx.shape[0]): + for i_region in range(idx.shape[1]): + unique_ind = torch.unique(idx[i_batch, i_region, :]) + num_unique = unique_ind.shape[0] + unique_cnt[i_batch, i_region] = num_unique + sample_ind = torch.randint(0, + num_unique, + (self.nsample - num_unique, ), + dtype=torch.long) + all_ind = torch.cat((unique_ind, unique_ind[sample_ind])) + idx[i_batch, i_region, :] = all_ind + + xyz_trans = xyz.transpose(1, 2).contiguous() + grouped_xyz = grouping_operation(xyz_trans, + idx) # (B, 3, npoint, nsample) + grouped_xyz -= new_xyz.transpose(1, 2).unsqueeze(-1) + if self.normalize_xyz: + grouped_xyz /= self.radius + + if features is not None: + grouped_features = grouping_operation(features, idx) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) # (B, C + 3, npoint, nsample) + else: + new_features = grouped_features + else: + assert (self.use_xyz + ), 'Cannot have not features and not use xyz as a feature!' + new_features = grouped_xyz + + ret = [new_features] + if self.ret_grouped_xyz: + ret.append(grouped_xyz) + if self.ret_unique_cnt: + ret.append(unique_cnt) + if len(ret) == 1: + return ret[0] + else: + return tuple(ret) + + +class GroupAll(nn.Module): + r""" + Groups all features + + Parameters + --------- + """ + + def __init__(self, use_xyz=True, ret_grouped_xyz=False): + # type: (GroupAll, bool) -> None + super(GroupAll, self).__init__() + self.use_xyz = use_xyz + + def forward(self, xyz, new_xyz, features=None): + # type: (GroupAll, torch.Tensor, torch.Tensor, torch.Tensor) -> Tuple[torch.Tensor] + r""" + Parameters + ---------- + xyz : torch.Tensor + xyz coordinates of the features (B, N, 3) + new_xyz : torch.Tensor + Ignored + features : torch.Tensor + Descriptors of the features (B, C, N) + + Returns + ------- + new_features : torch.Tensor + (B, C + 3, 1, N) tensor + """ + + grouped_xyz = xyz.transpose(1, 2).unsqueeze(2) + if features is not None: + grouped_features = features.unsqueeze(2) + if self.use_xyz: + new_features = torch.cat([grouped_xyz, grouped_features], + dim=1) # (B, 3 + C, 1, N) + else: + new_features = grouped_features + else: + new_features = grouped_xyz + + if self.ret_grouped_xyz: + return new_features, grouped_xyz + else: + return new_features diff --git a/models/Scanrefer/lib/pointnet2/pytorch_utils.py b/models/Scanrefer/lib/pointnet2/pytorch_utils.py new file mode 100644 index 0000000..7923b68 --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/pytorch_utils.py @@ -0,0 +1,273 @@ +# Copyright (c) Facebook, Inc. and its affiliates. +# +# This source code is licensed under the MIT license found in the +# LICENSE file in the root directory of this source tree. +"""Modified based on Ref: https://github.com/erikwijmans/Pointnet2_PyTorch.""" +from typing import List, Tuple + +import torch +import torch.nn as nn + + +class SharedMLP(nn.Sequential): + + def __init__(self, + args: List[int], + *, + bn: bool = False, + activation=nn.ReLU(inplace=True), + preact: bool = False, + first: bool = False, + name: str = ''): + super().__init__() + + for i in range(len(args) - 1): + self.add_module( + name + 'layer{}'.format(i), + Conv2d(args[i], + args[i + 1], + bn=(not first or not preact or (i != 0)) and bn, + activation=activation if + (not first or not preact or (i != 0)) else None, + preact=preact)) + + +class _BNBase(nn.Sequential): + + def __init__(self, in_size, batch_norm=None, name=''): + super().__init__() + self.add_module(name + 'bn', batch_norm(in_size)) + + nn.init.constant_(self[0].weight, 1.0) + nn.init.constant_(self[0].bias, 0) + + +class BatchNorm1d(_BNBase): + + def __init__(self, in_size: int, *, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm1d, name=name) + + +class BatchNorm2d(_BNBase): + + def __init__(self, in_size: int, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm2d, name=name) + + +class BatchNorm3d(_BNBase): + + def __init__(self, in_size: int, name: str = ''): + super().__init__(in_size, batch_norm=nn.BatchNorm3d, name=name) + + +class _ConvBase(nn.Sequential): + + def __init__(self, + in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=None, + batch_norm=None, + bias=True, + preact=False, + name=''): + super().__init__() + + bias = bias and (not bn) + conv_unit = conv(in_size, + out_size, + kernel_size=kernel_size, + stride=stride, + padding=padding, + bias=bias) + init(conv_unit.weight) + if bias: + nn.init.constant_(conv_unit.bias, 0) + + if bn: + if not preact: + bn_unit = batch_norm(out_size) + else: + bn_unit = batch_norm(in_size) + + if preact: + if bn: + self.add_module(name + 'bn', bn_unit) + + if activation is not None: + self.add_module(name + 'activation', activation) + + self.add_module(name + 'conv', conv_unit) + + if not preact: + if bn: + self.add_module(name + 'bn', bn_unit) + + if activation is not None: + self.add_module(name + 'activation', activation) + + +class Conv1d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: int = 1, + stride: int = 1, + padding: int = 0, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv1d, + batch_norm=BatchNorm1d, + bias=bias, + preact=preact, + name=name) + + +class Conv2d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int] = (1, 1), + stride: Tuple[int, int] = (1, 1), + padding: Tuple[int, int] = (0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv2d, + batch_norm=BatchNorm2d, + bias=bias, + preact=preact, + name=name) + + +class Conv3d(_ConvBase): + + def __init__(self, + in_size: int, + out_size: int, + *, + kernel_size: Tuple[int, int, int] = (1, 1, 1), + stride: Tuple[int, int, int] = (1, 1, 1), + padding: Tuple[int, int, int] = (0, 0, 0), + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=nn.init.kaiming_normal_, + bias: bool = True, + preact: bool = False, + name: str = ''): + super().__init__(in_size, + out_size, + kernel_size, + stride, + padding, + activation, + bn, + init, + conv=nn.Conv3d, + batch_norm=BatchNorm3d, + bias=bias, + preact=preact, + name=name) + + +class FC(nn.Sequential): + + def __init__(self, + in_size: int, + out_size: int, + *, + activation=nn.ReLU(inplace=True), + bn: bool = False, + init=None, + preact: bool = False, + name: str = ''): + super().__init__() + + fc = nn.Linear(in_size, out_size, bias=not bn) + if init is not None: + init(fc.weight) + if not bn: + nn.init.constant_(fc.bias, 0) + + if preact: + if bn: + self.add_module(name + 'bn', BatchNorm1d(in_size)) + + if activation is not None: + self.add_module(name + 'activation', activation) + + self.add_module(name + 'fc', fc) + + if not preact: + if bn: + self.add_module(name + 'bn', BatchNorm1d(out_size)) + + if activation is not None: + self.add_module(name + 'activation', activation) + + +def set_bn_momentum_default(bn_momentum): + + def fn(m): + if isinstance(m, (nn.BatchNorm1d, nn.BatchNorm2d, nn.BatchNorm3d)): + m.momentum = bn_momentum + + return fn + + +class BNMomentumScheduler(object): + + def __init__(self, + model, + bn_lambda, + last_epoch=-1, + setter=set_bn_momentum_default): + if not isinstance(model, nn.Module): + raise RuntimeError("Class '{}' is not a PyTorch nn Module".format( + type(model).__name__)) + + self.model = model + self.setter = setter + self.lmbd = bn_lambda + + self.step(last_epoch + 1) + self.last_epoch = last_epoch + + def step(self, epoch=None): + if epoch is None: + epoch = self.last_epoch + 1 + + self.last_epoch = epoch + self.model.apply(self.setter(self.lmbd(epoch))) diff --git a/models/Scanrefer/lib/pointnet2/setup.py b/models/Scanrefer/lib/pointnet2/setup.py new file mode 100644 index 0000000..c806d9d --- /dev/null +++ b/models/Scanrefer/lib/pointnet2/setup.py @@ -0,0 +1,38 @@ +import glob +import os +import os.path as osp + +from setuptools import find_packages, setup +from torch.utils.cpp_extension import BuildExtension, CUDAExtension + +_this_dir = osp.dirname(osp.abspath(__file__)) +_ext_src_root = '_ext_src' +_ext_sources = glob.glob('{}/src/*.cpp'.format(_ext_src_root)) + glob.glob( + '{}/src/*.cu'.format(_ext_src_root)) +_ext_headers = glob.glob('{}/include/*'.format(_ext_src_root)) + +requirements = ['torch>=1.4'] + +os.environ['TORCH_CUDA_ARCH_LIST'] = '3.7+PTX;5.0;6.0;6.1;6.2;7.0;7.5' + +exec(open('_version.py').read()) + +setup( + name='pointnet2', + version=__version__, + packages=find_packages(), + install_requires=requirements, + ext_modules=[ + CUDAExtension( + name='pointnet2._ext', + sources=_ext_sources, + extra_compile_args={ + 'cxx': ['-O3'], + 'nvcc': ['-O3', '-Xfatbin', '-compress-all'], + }, + include_dirs=[osp.join(_this_dir, _ext_src_root, 'include')], + ) + ], + cmdclass={'build_ext': BuildExtension}, + include_package_data=True, +) diff --git a/models/Scanrefer/lib/projection.py b/models/Scanrefer/lib/projection.py new file mode 100644 index 0000000..0c9fb41 --- /dev/null +++ b/models/Scanrefer/lib/projection.py @@ -0,0 +1,378 @@ +import torch +from torch.autograd import Function + + +class ProjectionHelper(): + + def __init__(self, + intrinsic, + depth_min, + depth_max, + image_dims, + accuracy, + cuda=True): + self.intrinsic = intrinsic + self.depth_min = depth_min + self.depth_max = depth_max + self.image_dims = image_dims + self.accuracy = accuracy + self.cuda = cuda + + # precompute + self._compute_corner_points() + + def depth_to_skeleton(self, ux, uy, depth): + # 2D to 3D coordinates with depth (used in compute_frustum_bounds) + x = (ux - self.intrinsic[0][2]) / self.intrinsic[0][0] + y = (uy - self.intrinsic[1][2]) / self.intrinsic[1][1] + return torch.Tensor([depth * x, depth * y, depth]) + + def skeleton_to_depth(self, p): + x = (p[0] * self.intrinsic[0][0]) / p[2] + self.intrinsic[0][2] + y = (p[1] * self.intrinsic[1][1]) / p[2] + self.intrinsic[1][2] + return torch.Tensor([x, y, p[2]]) + + def _compute_corner_points(self): + if self.cuda: + corner_points = torch.ones(8, 4).cuda() + else: + corner_points = torch.ones(8, 4) + + # image to camera + # depth min + corner_points[0][:3] = self.depth_to_skeleton(0, 0, self.depth_min) + corner_points[1][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, + 0, self.depth_min) + corner_points[2][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, + self.image_dims[1] - 1, + self.depth_min) + corner_points[3][:3] = self.depth_to_skeleton(0, + self.image_dims[1] - 1, + self.depth_min) + # depth max + corner_points[4][:3] = self.depth_to_skeleton(0, 0, self.depth_max) + corner_points[5][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, + 0, self.depth_max) + corner_points[6][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, + self.image_dims[1] - 1, + self.depth_max) + corner_points[7][:3] = self.depth_to_skeleton(0, + self.image_dims[1] - 1, + self.depth_max) + + self.corner_points = corner_points + + def compute_frustum_corners(self, camera_to_world): + """Computes the coordinates of the viewing frustum corresponding to one + image and given camera parameters. + + :param camera_to_world: torch tensor of shape (4, 4) + :return: corner_coords: torch tensor of shape (8, 4) + """ + # input: camera pose (torch.Size([4, 4])) + # output: coordinates of the corner points of the viewing frustum of the camera + + # corner_points = camera_to_world.new(8, 4, 1).fill_(1) + + # # image to camera + # # depth min + # corner_points[0][:3] = self.depth_to_skeleton(0, 0, self.depth_min).unsqueeze(1) + # corner_points[1][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, 0, self.depth_min).unsqueeze(1) + # corner_points[2][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, self.image_dims[1] - 1, self.depth_min).unsqueeze(1) + # corner_points[3][:3] = self.depth_to_skeleton(0, self.image_dims[1] - 1, self.depth_min).unsqueeze(1) + # # depth max + # corner_points[4][:3] = self.depth_to_skeleton(0, 0, self.depth_max).unsqueeze(1) + # corner_points[5][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, 0, self.depth_max).unsqueeze(1) + # corner_points[6][:3] = self.depth_to_skeleton(self.image_dims[0] - 1, self.image_dims[1] - 1, self.depth_max).unsqueeze(1) + # corner_points[7][:3] = self.depth_to_skeleton(0, self.image_dims[1] - 1, self.depth_max).unsqueeze(1) + + # camera to world + corner_coords = torch.bmm(camera_to_world.repeat(8, 1, 1), + self.corner_points.unsqueeze(2)) + + return corner_coords + + def compute_frustum_normals(self, corner_coords): + """Computes the normal vectors (pointing inwards) to the 6 planes that + bound the viewing frustum. + + :param corner_coords: torch tensor of shape (8, 4), coordinates of the corner points of the viewing frustum + :return: normals: torch tensor of shape (6, 3) + """ + + normals = corner_coords.new(6, 3) + + # compute plane normals + # front plane + plane_vec1 = corner_coords[3][:3] - corner_coords[0][:3] + plane_vec2 = corner_coords[1][:3] - corner_coords[0][:3] + normals[0] = torch.cross(plane_vec1.view(-1), plane_vec2.view(-1)) + + # right side plane + plane_vec1 = corner_coords[2][:3] - corner_coords[1][:3] + plane_vec2 = corner_coords[5][:3] - corner_coords[1][:3] + normals[1] = torch.cross(plane_vec1.view(-1), plane_vec2.view(-1)) + + # roof plane + plane_vec1 = corner_coords[3][:3] - corner_coords[2][:3] + plane_vec2 = corner_coords[6][:3] - corner_coords[2][:3] + normals[2] = torch.cross(plane_vec1.view(-1), plane_vec2.view(-1)) + + # left side plane + plane_vec1 = corner_coords[0][:3] - corner_coords[3][:3] + plane_vec2 = corner_coords[7][:3] - corner_coords[3][:3] + normals[3] = torch.cross(plane_vec1.view(-1), plane_vec2.view(-1)) + + # bottom plane + plane_vec1 = corner_coords[1][:3] - corner_coords[0][:3] + plane_vec2 = corner_coords[4][:3] - corner_coords[0][:3] + normals[4] = torch.cross(plane_vec1.view(-1), plane_vec2.view(-1)) + + # back plane + plane_vec1 = corner_coords[6][:3] - corner_coords[5][:3] + plane_vec2 = corner_coords[4][:3] - corner_coords[5][:3] + normals[5] = torch.cross(plane_vec1.view(-1), plane_vec2.view(-1)) + + return normals + + def points_in_frustum(self, + corner_coords, + normals, + new_pts, + return_mask=False): + """Checks whether new_pts ly in the frustum defined by the coordinates + of the corners coner_coords. + + :param corner_coords: torch tensor of shape (8, 4), coordinates of the corners of the viewing frustum + :param normals: torch tensor of shape (6, 3), normal vectors of the 6 planes of the viewing frustum + :param new_pts: (num_points, 3) + :param return_mask: if False, returns number of new_points in frustum + :return: if return_mask=True, returns Boolean mask determining whether point is in frustum or not + """ + + # create vectors from point set to the planes + point_to_plane1 = (new_pts.cuda() - corner_coords[2][:3].view(-1)) + point_to_plane2 = (new_pts.cuda() - corner_coords[4][:3].view(-1)) + + # check if the scalar product with the normals is positive + masks = list() + # for each normal, create a mask for points that lie on the correct side of the plane + for k, normal in enumerate(normals): + if k < 3: + masks.append( + torch.round( + torch.mm(point_to_plane1, normal.unsqueeze(1)) * 100) / + 100 < 0) + else: + masks.append( + torch.round( + torch.mm(point_to_plane2, normal.unsqueeze(1)) * 100) / + 100 < 0) + mask = torch.ones(point_to_plane1.shape[0]) > 0 + mask = mask.cuda() + + # create a combined mask, which keeps only the points that lie on the correct side of each plane + for addMask in masks: + mask = mask * addMask.squeeze() + + if return_mask: + return mask + else: + return torch.sum(mask) + + def points_in_frustum_cpu(self, + corner_coords, + normals, + new_pts, + return_mask=False): + """Checks whether new_pts ly in the frustum defined by the coordinates + of the corners coner_coords. + + :param corner_coords: torch tensor of shape (8, 4), coordinates of the corners of the viewing frustum + :param normals: torch tensor of shape (6, 3), normal vectors of the 6 planes of the viewing frustum + :param new_pts: (num_points, 3) + :param return_mask: if False, returns number of new_points in frustum + :return: if return_mask=True, returns Boolean mask determining whether point is in frustum or not + """ + + # create vectors from point set to the planes + point_to_plane1 = (new_pts - corner_coords[2][:3].view(-1)) + point_to_plane2 = (new_pts - corner_coords[4][:3].view(-1)) + + # check if the scalar product with the normals is positive + masks = list() + # for each normal, create a mask for points that lie on the correct side of the plane + for k, normal in enumerate(normals): + if k < 3: + masks.append( + torch.round( + torch.mm(point_to_plane1, normal.unsqueeze(1)) * 100) / + 100 < 0) + else: + masks.append( + torch.round( + torch.mm(point_to_plane2, normal.unsqueeze(1)) * 100) / + 100 < 0) + mask = torch.ones(point_to_plane1.shape[0]) > 0 + + # create a combined mask, which keeps only the points that lie on the correct side of each plane + for addMask in masks: + mask = mask * addMask.squeeze() + + if return_mask: + return mask + else: + return torch.sum(mask) + + def compute_projection(self, points, depth, camera_to_world): + """Computes correspondances of points to pixels. + + :param points: tensor containing all points of the point cloud (num_points, 3) + :param depth: depth map (size: proj_image) + :param camera_to_world: camera pose (4, 4) + :param num_points: number of points in one sample point cloud (4096) + :return: indices_3d (array with point indices that correspond to a pixel), + indices_2d (array with pixel indices that correspond to a point) + """ + + num_points = points.shape[0] + world_to_camera = torch.inverse(camera_to_world) + + # create 1-dim array with all indices and array with 4-dim coordinates x, y, z, 1 of points + ind_points = torch.arange(0, num_points, out=torch.LongTensor()).cuda() + coords = camera_to_world.new(4, num_points) + coords[:3, :] = torch.t(points) + coords[3, :].fill_(1) + + # compute viewing frustum + corner_coords = self.compute_frustum_corners(camera_to_world) + normals = self.compute_frustum_normals(corner_coords) + + # check if points are in viewing frustum and only keep according indices + mask_frustum_bounds = self.points_in_frustum(corner_coords, + normals, + points, + return_mask=True).cuda() + + if not mask_frustum_bounds.any(): + return None + ind_points = ind_points[mask_frustum_bounds] + coords = coords[:, ind_points] + + # project world (coords) to camera + camera = torch.mm(world_to_camera, coords) + + # project camera to image + camera[0] = (camera[0] * + self.intrinsic[0][0]) / camera[2] + self.intrinsic[0][2] + camera[1] = (camera[1] * + self.intrinsic[1][1]) / camera[2] + self.intrinsic[1][2] + image = torch.round(camera).long() + + # keep points that are projected onto the image into the correct pixel range + valid_ind_mask = torch.ge(image[0], 0) * torch.ge( + image[1], 0) * torch.lt(image[0], self.image_dims[0]) * torch.lt( + image[1], self.image_dims[1]) + if not valid_ind_mask.any(): + return None + valid_image_ind_x = image[0][valid_ind_mask] + valid_image_ind_y = image[1][valid_ind_mask] + valid_image_ind = valid_image_ind_y * self.image_dims[ + 0] + valid_image_ind_x + + # keep only points that are in the correct depth ranges (self.depth_min - self.depth_max) + depth_vals = torch.index_select(depth.view(-1), 0, + valid_image_ind.cuda()) + depth_mask = depth_vals.ge(self.depth_min) * depth_vals.le( + self.depth_max) * torch.abs(depth_vals - + camera[2][valid_ind_mask]).le( + self.accuracy) + if not depth_mask.any(): + return None + + # create two vectors for all considered points that establish 3d to 2d correspondence + ind_update = ind_points[valid_ind_mask] + ind_update = ind_update[depth_mask] + indices_3d = ind_update.new(num_points + 1).fill_( + 0 + ) # needs to be same size for all in batch... (first element has size) + indices_2d = ind_update.new(num_points + 1).fill_( + 0 + ) # needs to be same size for all in batch... (first element has size) + indices_3d[0] = ind_update.shape[ + 0] # first entry: number of relevant entries (of points) + indices_2d[0] = ind_update.shape[0] + indices_3d[1:1 + indices_3d[0]] = ind_update # indices of points + indices_2d[1:1 + indices_2d[0]] = torch.index_select( + valid_image_ind, 0, + torch.nonzero(depth_mask)[:, 0]) # indices of corresponding pixels + + return indices_3d, indices_2d + + @torch.no_grad() + def project(self, label, lin_indices_3d, lin_indices_2d, num_points): + """forward pass of backprojection for 2d features onto 3d points. + + :param label: image features (shape: (num_input_channels, proj_image_dims[0], proj_image_dims[1])) + :param lin_indices_3d: point indices from projection (shape: (num_input_channels, num_points_sample)) + :param lin_indices_2d: pixel indices from projection (shape: (num_input_channels, num_points_sample)) + :param num_points: number of points in one sample + :return: array of points in sample with projected features (shape: (num_input_channels, num_points)) + """ + + num_label_ft = 1 if len( + label.shape) == 2 else label.shape[0] # = num_input_channels + + output = label.new(num_label_ft, num_points).fill_(0) + num_ind = lin_indices_3d[0] + if num_ind > 0: + # selects values from image_features at indices given by lin_indices_2d + vals = torch.index_select(label.view(num_label_ft, -1), 1, + lin_indices_2d[1:1 + num_ind]) + output.view(num_label_ft, -1)[:, + lin_indices_3d[1:1 + num_ind]] = vals + + return output + + +# Inherit from Function +class Projection(Function): + + @staticmethod + def forward(ctx, label, lin_indices_3d, lin_indices_2d, num_points): + """forward pass of backprojection for 2d features onto 3d points. + + :param label: image features (shape: (num_input_channels, proj_image_dims[0], proj_image_dims[1])) + :param lin_indices_3d: point indices from projection (shape: (num_input_channels, num_points_sample)) + :param lin_indices_2d: pixel indices from projection (shape: (num_input_channels, num_points_sample)) + :param num_points: number of points in one sample + :return: array of points in sample with projected features (shape: (num_input_channels, num_points)) + """ + # ctx.save_for_backward(lin_indices_3d, lin_indices_2d) + num_label_ft = 1 if len( + label.shape) == 2 else label.shape[0] # = num_input_channels + + output = label.new(num_label_ft, num_points).fill_(0) + num_ind = lin_indices_3d[0] + if num_ind > 0: + # selects values from image_features at indices given by lin_indices_2d + vals = torch.index_select(label.view(num_label_ft, -1), 1, + lin_indices_2d[1:1 + num_ind]) + output.view(num_label_ft, -1)[:, + lin_indices_3d[1:1 + num_ind]] = vals + return output + + @staticmethod + def backward(ctx, grad_output): + grad_label = grad_output.clone() + num_ft = grad_output.shape[0] + grad_label.resize_(num_ft, 32, 41) + lin_indices_3d, lin_indices_2d = ctx.saved_variables + num_ind = lin_indices_3d.data[0] + vals = torch.index_select( + grad_output.data.contiguous().view(num_ft, -1), 1, + lin_indices_3d.data[1:1 + num_ind]) + grad_label.data.view(num_ft, + -1)[:, lin_indices_2d.data[1:1 + num_ind]] = vals + + return grad_label, None, None, None diff --git a/models/Scanrefer/lib/solver.py b/models/Scanrefer/lib/solver.py new file mode 100644 index 0000000..9d6d33e --- /dev/null +++ b/models/Scanrefer/lib/solver.py @@ -0,0 +1,642 @@ +''' +File Created: Monday, 25th November 2019 1:35:30 pm +Author: Dave Zhenyu Chen (zhenyu.chen@tum.de) +''' + +import os +import sys +import time + +import numpy as np +import torch +from tensorboardX import SummaryWriter +from torch.optim.lr_scheduler import MultiStepLR, StepLR +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from lib.config import CONF +from lib.eval_helper import get_eval, inference +from lib.loss_helper import get_loss +from lib.pointnet2.pytorch_utils import BNMomentumScheduler +from utils.eta import decode_eta + +# we skip call for these values will training. +# train_lang_acc +# train_ref_acc +# train_obj_acc +# train_pos_ratio, train_neg_ratio +# train_iou_rate_0.25, train_iou_rate_0.5 +# mean_eval_time + +ITER_REPORT_TEMPLATE = """ +-------------------------------iter: [{epoch_id}: {iter_id}/{total_iter}]------------------------------- +[loss] train_loss: {train_loss} +[loss] train_ref_loss: {train_ref_loss} +[loss] train_lang_loss: {train_lang_loss} +[loss] train_objectness_loss: {train_objectness_loss} +[loss] train_vote_loss: {train_vote_loss} +[loss] train_box_loss: {train_box_loss} +[info] mean_fetch_time: {mean_fetch_time}s +[info] mean_forward_time: {mean_forward_time}s +[info] mean_backward_time: {mean_backward_time}s +[info] mean_iter_time: {mean_iter_time}s +[info] ETA: {eta_h}h {eta_m}m {eta_s}s +""" + +EPOCH_REPORT_TEMPLATE = """ +---------------------------------summary--------------------------------- +[train] train_loss: {train_loss} +[train] train_ref_loss: {train_ref_loss} +[train] train_lang_loss: {train_lang_loss} +[train] train_objectness_loss: {train_objectness_loss} +[train] train_vote_loss: {train_vote_loss} +[train] train_box_loss: {train_box_loss} +[train] train_lang_acc: {train_lang_acc} +[train] train_ref_acc: {train_ref_acc} +[train] train_obj_acc: {train_obj_acc} +[train] train_pos_ratio: {train_pos_ratio}, train_neg_ratio: {train_neg_ratio} +[train] train_iou_rate_0.25: {train_iou_rate_25}, train_iou_rate_0.5: {train_iou_rate_5} +[val] val_loss: {val_loss} +[val] val_ref_loss: {val_ref_loss} +[val] val_lang_loss: {val_lang_loss} +[val] val_objectness_loss: {val_objectness_loss} +[val] val_vote_loss: {val_vote_loss} +[val] val_box_loss: {val_box_loss} +[val] val_lang_acc: {val_lang_acc} +[val] val_ref_acc: {val_ref_acc} +[val] val_obj_acc: {val_obj_acc} +[val] val_pos_ratio: {val_pos_ratio}, val_neg_ratio: {val_neg_ratio} +[val] val_iou_rate_0.25: {val_iou_rate_25}, val_iou_rate_0.5: {val_iou_rate_5} +""" + +BEST_REPORT_TEMPLATE = """ +--------------------------------------best-------------------------------------- +[best] epoch: {epoch} +[loss] loss: {loss} +[loss] ref_loss: {ref_loss} +[loss] lang_loss: {lang_loss} +[loss] objectness_loss: {objectness_loss} +[loss] vote_loss: {vote_loss} +[loss] box_loss: {box_loss} +[loss] lang_acc: {lang_acc} +[sco.] ref_acc: {ref_acc} +[sco.] obj_acc: {obj_acc} +[sco.] pos_ratio: {pos_ratio}, neg_ratio: {neg_ratio} +[sco.] iou_rate_0.25: {iou_rate_25}, iou_rate_0.5: {iou_rate_5} +""" + + +class Solver(): + + def __init__(self, + model, + config, + dataloader, + optimizer, + stamp, + val_step=10, + detection=True, + reference=True, + use_lang_classifier=True, + lr_decay_step=None, + lr_decay_rate=None, + bn_decay_step=None, + bn_decay_rate=None, + eval_only=False): + + self.epoch = 0 # set in __call__ + self.verbose = 0 # set in __call__ + + self.model = model + self.config = config + self.dataloader = dataloader + self.optimizer = optimizer + self.stamp = stamp + self.val_step = val_step + + self.detection = detection + self.reference = reference + self.use_lang_classifier = use_lang_classifier + + self.lr_decay_step = lr_decay_step + self.lr_decay_rate = lr_decay_rate + self.bn_decay_step = bn_decay_step + self.bn_decay_rate = bn_decay_rate + + self.eval_only = eval_only + + self.best = { + 'epoch': 0, + 'loss': float('inf'), + 'ref_loss': float('inf'), + 'lang_loss': float('inf'), + 'objectness_loss': float('inf'), + 'vote_loss': float('inf'), + 'box_loss': float('inf'), + 'lang_acc': -float('inf'), + 'ref_acc': -float('inf'), + 'obj_acc': -float('inf'), + 'pos_ratio': -float('inf'), + 'neg_ratio': -float('inf'), + 'iou_rate_0.25': -float('inf'), + 'iou_rate_0.5': -float('inf') + } + + # init log + # contains all necessary info for all phases + self.log = {'train': {}, 'val': {}} + + # tensorboard + os.makedirs(os.path.join(CONF.PATH.OUTPUT, stamp, 'tensorboard/train'), + exist_ok=True) + os.makedirs(os.path.join(CONF.PATH.OUTPUT, stamp, 'tensorboard/val'), + exist_ok=True) + self._log_writer = { + 'train': + SummaryWriter( + os.path.join(CONF.PATH.OUTPUT, stamp, 'tensorboard/train')), + 'val': + SummaryWriter( + os.path.join(CONF.PATH.OUTPUT, stamp, 'tensorboard/val')) + } + + # training log + log_path = os.path.join(CONF.PATH.OUTPUT, stamp, 'log.txt') + self.log_fout = open(log_path, 'a') + + # private + # only for internal access and temporary results + self._running_log = {} + self._global_iter_id = 0 + self._total_iter = {} # set in __call__ + + # templates + self.__iter_report_template = ITER_REPORT_TEMPLATE + self.__epoch_report_template = EPOCH_REPORT_TEMPLATE + self.__best_report_template = BEST_REPORT_TEMPLATE + + # lr scheduler + if lr_decay_step and lr_decay_rate: + if isinstance(lr_decay_step, list): + self.lr_scheduler = MultiStepLR(optimizer, lr_decay_step, + lr_decay_rate) + else: + self.lr_scheduler = StepLR(optimizer, lr_decay_step, + lr_decay_rate) + else: + self.lr_scheduler = None + + # bn scheduler + if bn_decay_step and bn_decay_rate: + it = -1 + start_epoch = 0 + BN_MOMENTUM_INIT = 0.5 + BN_MOMENTUM_MAX = 0.001 + bn_lbmd = lambda it: max( + BN_MOMENTUM_INIT * bn_decay_rate** + (int(it / bn_decay_step)), BN_MOMENTUM_MAX) + self.bn_scheduler = BNMomentumScheduler(model, + bn_lambda=bn_lbmd, + last_epoch=start_epoch - 1) + else: + self.bn_scheduler = None + + def __call__(self, epoch, verbose): + # setting + self.epoch = epoch + self.verbose = verbose + self._total_iter['train'] = len(self.dataloader['train']) * epoch + self._total_iter['val'] = len(self.dataloader['val']) * self.val_step + if self.eval_only: + self._log('eval only mode, skip training...\n') + self._feed(self.dataloader['val'], 'val', 0) + else: + for epoch_id in range(epoch): + try: + self._log('epoch {} starting...'.format(epoch_id + 1)) + + # feed + self._feed(self.dataloader['train'], 'train', epoch_id) + + self._log('saving last models...\n') + model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp) + torch.save(self.model.state_dict(), + os.path.join(model_root, 'model_last.pth')) + + # update lr scheduler + if self.lr_scheduler: + print('update learning rate --> {}\n'.format( + self.lr_scheduler.get_lr())) + self.lr_scheduler.step() + + # update bn scheduler + if self.bn_scheduler: + print('update batch normalization momentum --> {}\n'. + format( + self.bn_scheduler.lmbd( + self.bn_scheduler.last_epoch))) + self.bn_scheduler.step() + + except KeyboardInterrupt: + # finish training + self._finish(epoch_id) + exit() + + # finish training + self._finish(epoch_id) + + def _log(self, info_str): + self.log_fout.write(info_str + '\n') + self.log_fout.flush() + print(info_str) + + def _reset_log(self, phase): + self.log[phase] = { + # info + 'forward': [], + 'backward': [], + 'eval': [], + 'fetch': [], + 'iter_time': [], + # loss (float, not torch.cuda.FloatTensor) + 'loss': [], + 'ref_loss': [], + 'lang_loss': [], + 'objectness_loss': [], + 'vote_loss': [], + 'box_loss': [], + # scores (float, not torch.cuda.FloatTensor) + 'lang_acc': [], + 'ref_acc': [], + 'obj_acc': [], + 'pos_ratio': [], + 'neg_ratio': [], + 'iou_rate_0.25': [], + 'iou_rate_0.5': [] + } + + def _set_phase(self, phase): + if phase == 'train': + self.model.train() + elif phase == 'val': + self.model.eval() + else: + raise ValueError('invalid phase') + + def _forward(self, data_dict): + data_dict = self.model(data_dict) + + return data_dict + + def _backward(self): + # optimize + self.optimizer.zero_grad() + self._running_log['loss'].backward() + self.optimizer.step() + + def _compute_loss(self, data_dict): + _, data_dict = get_loss(data_dict=data_dict, + config=self.config, + detection=self.detection, + reference=self.reference, + use_lang_classifier=self.use_lang_classifier) + + # dump + self._running_log['ref_loss'] = data_dict['ref_loss'] + self._running_log['lang_loss'] = data_dict['lang_loss'] + self._running_log['objectness_loss'] = data_dict['objectness_loss'] + self._running_log['vote_loss'] = data_dict['vote_loss'] + self._running_log['box_loss'] = data_dict['box_loss'] + self._running_log['loss'] = data_dict['loss'] + + def _eval(self, data_dict, pred_list, gt_list): + metric = get_eval(pred_list, gt_list, self.log_fout) + + # skip this step + # TODO: fill this + # for key in metric: + # self._running_log[key] = metric[key] + + def _feed(self, dataloader, phase, epoch_id): + print('phase:', phase) + # switch mode + self._set_phase(phase) + + # re-init log + self._reset_log(phase) + + # change dataloader + dataloader = tqdm(dataloader) if phase == 'train' else tqdm(dataloader) + + pred_list = [] + gt_list = [] + for data_dict in dataloader: + # move to cuda + for key in data_dict: + if key not in ['sub_class', 'sample_ID']: + data_dict[key] = data_dict[key].cuda() + + # initialize the running loss + self._running_log = { + # loss + 'loss': 0, + 'ref_loss': 0, + 'lang_loss': 0, + 'objectness_loss': 0, + 'vote_loss': 0, + 'box_loss': 0, + # acc + 'lang_acc': 0, + 'ref_acc': 0, + 'obj_acc': 0, + 'pos_ratio': 0, + 'neg_ratio': 0, + 'iou_rate_0.25': 0, + 'iou_rate_0.5': 0 + } + + # load + self.log[phase]['fetch'].append( + data_dict['load_time'].sum().item()) + + with torch.autograd.set_detect_anomaly(True): + # forward + start = time.time() + data_dict = self._forward(data_dict) + # print('forward time:', time.time() - start) + + # backward + if phase == 'train': + self._compute_loss(data_dict) + self.log[phase]['forward'].append(time.time() - start) + start = time.time() + self._backward() + self.log[phase]['backward'].append(time.time() - start) + + # eval + if phase == 'val': + start = time.time() + pred_sample, gt_sample = inference(data_dict=data_dict, + config=self.config) + pred_list += pred_sample + gt_list += gt_sample + self.log[phase]['eval'].append(time.time() - start) + + # record log + if phase == 'train': + self.log[phase]['loss'].append( + self._running_log['loss'].item()) + self.log[phase]['ref_loss'].append( + self._running_log['ref_loss'].item()) + self.log[phase]['lang_loss'].append( + self._running_log['lang_loss'].item()) + self.log[phase]['objectness_loss'].append( + self._running_log['objectness_loss'].item()) + self.log[phase]['vote_loss'].append( + self._running_log['vote_loss'].item()) + self.log[phase]['box_loss'].append( + self._running_log['box_loss'].item()) + + import pdb + + # report + if phase == 'train': + iter_time = self.log[phase]['fetch'][-1] + iter_time += self.log[phase]['forward'][-1] + iter_time += self.log[phase]['backward'][-1] + # iter_time += self.log[phase]["eval"][-1] + self.log[phase]['iter_time'].append(iter_time) + if (self._global_iter_id + 1) % self.verbose == 0: + self._train_report(epoch_id) + + # # evaluation + # if (self._global_iter_id + 1) % (self.verbose*5) == 0: + # print("evaluating...") + # # val + # self._feed(self.dataloader["val"], "val", epoch_id) + # # self._dump_log("val") + # self._set_phase("train") + # self._epoch_report(epoch_id) + + # dump log + self._dump_log('train') + self._global_iter_id += 1 + + if phase == 'val': + self._eval(data_dict, pred_list, gt_list) + + # check best + if phase == 'val': + cur_criterion = 'iou_rate_0.5' + cur_best = np.mean(self.log[phase][cur_criterion]) + if cur_best > self.best[cur_criterion]: + self._log('best {} achieved: {}'.format( + cur_criterion, cur_best)) + self._log('current train_loss: {}'.format( + np.mean(self.log['train']['loss']))) + self._log('current val_loss: {}'.format( + np.mean(self.log['val']['loss']))) + self.best['epoch'] = epoch_id + 1 + self.best['loss'] = np.mean(self.log[phase]['loss']) + self.best['ref_loss'] = np.mean(self.log[phase]['ref_loss']) + self.best['lang_loss'] = np.mean(self.log[phase]['lang_loss']) + self.best['objectness_loss'] = np.mean( + self.log[phase]['objectness_loss']) + self.best['vote_loss'] = np.mean(self.log[phase]['vote_loss']) + self.best['box_loss'] = np.mean(self.log[phase]['box_loss']) + self.best['lang_acc'] = np.mean(self.log[phase]['lang_acc']) + self.best['ref_acc'] = np.mean(self.log[phase]['ref_acc']) + self.best['obj_acc'] = np.mean(self.log[phase]['obj_acc']) + self.best['pos_ratio'] = np.mean(self.log[phase]['pos_ratio']) + self.best['neg_ratio'] = np.mean(self.log[phase]['neg_ratio']) + self.best['iou_rate_0.25'] = np.mean( + self.log[phase]['iou_rate_0.25']) + self.best['iou_rate_0.5'] = np.mean( + self.log[phase]['iou_rate_0.5']) + + # save model + self._log('saving best models...\n') + model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp) + torch.save(self.model.state_dict(), + os.path.join(model_root, 'model.pth')) + + def _dump_log(self, phase): + log = { + 'loss': [ + 'loss', 'ref_loss', 'lang_loss', 'objectness_loss', + 'vote_loss', 'box_loss' + ] + # "score": ["lang_acc", "ref_acc", "obj_acc", "pos_ratio", "neg_ratio", "iou_rate_0.25", "iou_rate_0.5"] + } + for key in log: + for item in log[key]: + self._log_writer[phase].add_scalar( + '{}/{}'.format(key, item), + np.mean([v for v in self.log[phase][item]]), + self._global_iter_id) + + def _finish(self, epoch_id): + # print best + self._best_report() + + # save check point + self._log('saving checkpoint...\n') + save_dict = { + 'epoch': epoch_id, + 'model_state_dict': self.model.state_dict(), + 'optimizer_state_dict': self.optimizer.state_dict() + } + checkpoint_root = os.path.join(CONF.PATH.OUTPUT, self.stamp) + torch.save(save_dict, os.path.join(checkpoint_root, 'checkpoint.tar')) + + # save model + self._log('saving last models...\n') + model_root = os.path.join(CONF.PATH.OUTPUT, self.stamp) + torch.save(self.model.state_dict(), + os.path.join(model_root, 'model_last.pth')) + + # export + for phase in ['train', 'val']: + self._log_writer[phase].export_scalars_to_json( + os.path.join(CONF.PATH.OUTPUT, self.stamp, + 'tensorboard/{}'.format(phase), + 'all_scalars.json')) + + def _train_report(self, epoch_id): + # compute ETA + fetch_time = self.log['train']['fetch'] + forward_time = self.log['train']['forward'] + backward_time = self.log['train']['backward'] + eval_time = self.log['train']['eval'] + iter_time = self.log['train']['iter_time'] + + mean_train_time = np.mean(iter_time) + mean_est_val_time = np.mean([ + fetch + forward for fetch, forward in zip(fetch_time, forward_time) + ]) + eta_sec = (self._total_iter['train'] - self._global_iter_id - + 1) * mean_train_time + eta_sec += len(self.dataloader['val']) * np.ceil( + self._total_iter['train'] / self.val_step) * mean_est_val_time + eta = decode_eta(eta_sec) + + # print report + iter_report = self.__iter_report_template.format( + epoch_id=epoch_id + 1, + iter_id=self._global_iter_id + 1, + total_iter=self._total_iter['train'], + train_loss=round(np.mean([v for v in self.log['train']['loss']]), + 5), + train_ref_loss=round( + np.mean([v for v in self.log['train']['ref_loss']]), 5), + train_lang_loss=round( + np.mean([v for v in self.log['train']['lang_loss']]), 5), + train_objectness_loss=round( + np.mean([v for v in self.log['train']['objectness_loss']]), 5), + train_vote_loss=round( + np.mean([v for v in self.log['train']['vote_loss']]), 5), + train_box_loss=round( + np.mean([v for v in self.log['train']['box_loss']]), 5), + train_lang_acc=round( + np.mean([v for v in self.log['train']['lang_acc']]), 5), + train_ref_acc=round( + np.mean([v for v in self.log['train']['ref_acc']]), 5), + train_obj_acc=round( + np.mean([v for v in self.log['train']['obj_acc']]), 5), + train_pos_ratio=round( + np.mean([v for v in self.log['train']['pos_ratio']]), 5), + train_neg_ratio=round( + np.mean([v for v in self.log['train']['neg_ratio']]), 5), + train_iou_rate_25=round( + np.mean([v for v in self.log['train']['iou_rate_0.25']]), 5), + train_iou_rate_5=round( + np.mean([v for v in self.log['train']['iou_rate_0.5']]), 5), + mean_fetch_time=round(np.mean(fetch_time), 5), + mean_forward_time=round(np.mean(forward_time), 5), + mean_backward_time=round(np.mean(backward_time), 5), + mean_eval_time=round(np.mean(eval_time), 5), + mean_iter_time=round(np.mean(iter_time), 5), + eta_h=eta['h'], + eta_m=eta['m'], + eta_s=eta['s']) + self._log(iter_report) + + def _epoch_report(self, epoch_id): + self._log('epoch [{}/{}] done...'.format(epoch_id + 1, self.epoch)) + epoch_report = self.__epoch_report_template.format( + train_loss=round(np.mean([v for v in self.log['train']['loss']]), + 5), + train_ref_loss=round( + np.mean([v for v in self.log['train']['ref_loss']]), 5), + train_lang_loss=round( + np.mean([v for v in self.log['train']['lang_loss']]), 5), + train_objectness_loss=round( + np.mean([v for v in self.log['train']['objectness_loss']]), 5), + train_vote_loss=round( + np.mean([v for v in self.log['train']['vote_loss']]), 5), + train_box_loss=round( + np.mean([v for v in self.log['train']['box_loss']]), 5), + train_lang_acc=round( + np.mean([v for v in self.log['train']['lang_acc']]), 5), + train_ref_acc=round( + np.mean([v for v in self.log['train']['ref_acc']]), 5), + train_obj_acc=round( + np.mean([v for v in self.log['train']['obj_acc']]), 5), + train_pos_ratio=round( + np.mean([v for v in self.log['train']['pos_ratio']]), 5), + train_neg_ratio=round( + np.mean([v for v in self.log['train']['neg_ratio']]), 5), + train_iou_rate_25=round( + np.mean([v for v in self.log['train']['iou_rate_0.25']]), 5), + train_iou_rate_5=round( + np.mean([v for v in self.log['train']['iou_rate_0.5']]), 5), + val_loss=round(np.mean([v for v in self.log['val']['loss']]), 5), + val_ref_loss=round( + np.mean([v for v in self.log['val']['ref_loss']]), 5), + val_lang_loss=round( + np.mean([v for v in self.log['val']['lang_loss']]), 5), + val_objectness_loss=round( + np.mean([v for v in self.log['val']['objectness_loss']]), 5), + val_vote_loss=round( + np.mean([v for v in self.log['val']['vote_loss']]), 5), + val_box_loss=round( + np.mean([v for v in self.log['val']['box_loss']]), 5), + val_lang_acc=round( + np.mean([v for v in self.log['val']['lang_acc']]), 5), + val_ref_acc=round(np.mean([v for v in self.log['val']['ref_acc']]), + 5), + val_obj_acc=round(np.mean([v for v in self.log['val']['obj_acc']]), + 5), + val_pos_ratio=round( + np.mean([v for v in self.log['val']['pos_ratio']]), 5), + val_neg_ratio=round( + np.mean([v for v in self.log['val']['neg_ratio']]), 5), + val_iou_rate_25=round( + np.mean([v for v in self.log['val']['iou_rate_0.25']]), 5), + val_iou_rate_5=round( + np.mean([v for v in self.log['val']['iou_rate_0.5']]), 5), + ) + self._log(epoch_report) + + def _best_report(self): + self._log('training completed...') + best_report = self.__best_report_template.format( + epoch=self.best['epoch'], + loss=round(self.best['loss'], 5), + ref_loss=round(self.best['ref_loss'], 5), + lang_loss=round(self.best['lang_loss'], 5), + objectness_loss=round(self.best['objectness_loss'], 5), + vote_loss=round(self.best['vote_loss'], 5), + box_loss=round(self.best['box_loss'], 5), + lang_acc=round(self.best['lang_acc'], 5), + ref_acc=round(self.best['ref_acc'], 5), + obj_acc=round(self.best['obj_acc'], 5), + pos_ratio=round(self.best['pos_ratio'], 5), + neg_ratio=round(self.best['neg_ratio'], 5), + iou_rate_25=round(self.best['iou_rate_0.25'], 5), + iou_rate_5=round(self.best['iou_rate_0.5'], 5), + ) + self._log(best_report) + with open(os.path.join(CONF.PATH.OUTPUT, self.stamp, 'best.txt'), + 'w') as f: + f.write(best_report) diff --git a/models/Scanrefer/lib/utils_3d.py b/models/Scanrefer/lib/utils_3d.py new file mode 100644 index 0000000..6b8bfa5 --- /dev/null +++ b/models/Scanrefer/lib/utils_3d.py @@ -0,0 +1,400 @@ +import numpy as np +from scipy.linalg import eigh +from scipy.spatial import ConvexHull + + +def interpolate_bbox_points(bbox, granularity=0.2, return_size=False): + """Get the surface points of a 3D bounding box. + + Args: + bbox: an open3d.geometry.OrientedBoundingBox object. + granularity: the roughly desired distance between two adjacent surface points. + return_size: if True, return m1, m2, m3 as well. + Returns: + M x 3 numpy array of Surface points of the bounding box + (m1, m2, m3): if return_size is True, return the number for each dimension.) + """ + corners = np.array(bbox.get_box_points()) + v1, v2, v3 = ( + corners[1] - corners[0], + corners[2] - corners[0], + corners[3] - corners[0], + ) + l1, l2, l3 = np.linalg.norm(v1), np.linalg.norm(v2), np.linalg.norm(v3) + assert (np.allclose(v1.dot(v2), 0) and np.allclose(v2.dot(v3), 0) + and np.allclose(v3.dot(v1), 0)) + transformation_matrix = np.column_stack((v1, v2, v3)) + m1, m2, m3 = l1 / granularity, l2 / granularity, l3 / granularity + m1, m2, m3 = int(np.ceil(m1)), int(np.ceil(m2)), int(np.ceil(m3)) + coords = np.array( + np.meshgrid(np.arange(m1 + 1), np.arange(m2 + 1), + np.arange(m3 + 1))).T.reshape(-1, 3) + condition = ((coords[:, 0] == 0) + | (coords[:, 0] == m1 - 1) + | (coords[:, 1] == 0) + | (coords[:, 1] == m2 - 1) + | (coords[:, 2] == 0) + | (coords[:, 2] == m3 - 1)) + surface_points = coords[condition].astype( + 'float32') # keep only the points on the surface + surface_points /= np.array([m1, m2, m3]) + mapped_coords = surface_points @ transformation_matrix + mapped_coords = mapped_coords.reshape(-1, 3) + corners[0] + if return_size: + return mapped_coords, (m1, m2, m3) + return mapped_coords + + +def check_bboxes_visibility(bboxes, + depth_map, + depth_intrinsic, + extrinsic, + corners_only=True, + granularity=0.2): + """Check the visibility of 3D bounding boxes in a depth map. + + Args: + bboxes: a list of N open3d.geometry.OrientedBoundingBox + depth_map: depth map, numpy array of shape (h, w). + depth_intrinsic: numpy array of shape (4, 4). + extrinsic: w2c. numpy array of shape (4, 4). + corners_only: if True, only check the corners of the bounding boxes. + granularity: the roughly desired distance between two adjacent surface points. + Returns: + Boolean array of shape (N, ) indicating the visibility of each bounding box. + """ + if corners_only: + points = [box.get_box_points() for box in bboxes] + num_points_per_bbox = [8] * len(bboxes) + points = np.concatenate(points, axis=0) # shape (N*8, 3) + else: + points, num_points_per_bbox, num_points_to_view = [], [], [] + for bbox in bboxes: + interpolated_points, (m1, m2, m3) = interpolate_bbox_points( + bbox, granularity=granularity, return_size=True) + num_points_per_bbox.append(interpolated_points.shape[0]) + points.append(interpolated_points) + num_points_to_view.append(max(m1 * m2, m1 * m3, m2 * m3)) + points = np.concatenate(points, axis=0) # shape (\sum Mi, 3) + num_points_to_view = np.array(num_points_to_view) + num_points_per_bbox = np.array(num_points_per_bbox) + visibles = check_point_visibility(points, depth_map, depth_intrinsic, + extrinsic) + num_visibles = [] + left = 0 + for i, num_points in enumerate(num_points_per_bbox): + slice_i = visibles[left:left + num_points] + num_visibles.append(np.sum(slice_i)) + left += num_points + num_visibles = np.array(num_visibles) + visibles = num_visibles / num_points_to_view >= 1 # threshold for visibility + return visibles + + +def check_point_visibility(points, depth_map, depth_intrinsic, extrinsic): + """Check the visibility of 3D points in a depth map. + + Args: + points: 3D points, numpy array of shape (n, 3). + depth_map: depth map, numpy array of shape (h, w). + depth_intrinsic: numpy array of shape (4, 4). + extrinsic: w2c. numpy array of shape (4, 4). + Returns: + Boolean array of shape (n, ) indicating the visibility of each point. + """ + # Project 3D points to 2D image plane + visibles = np.ones(points.shape[0], dtype=bool) + points = np.concatenate([points, np.ones_like(points[..., :1])], + axis=-1) # shape (n, 4) + points = depth_intrinsic @ extrinsic @ points.T # (4, n) + xs, ys, zs = points[:3, :] + visibles &= zs > 0 # remove points behind the camera + xs, ys = xs / zs, ys / zs # normalize to image plane + height, width = depth_map.shape + visibles &= ((0 <= xs) & (xs < width) & (0 <= ys) & (ys < height) + ) # remove points outside the image + xs[(xs < 0) | (xs >= width)] = 0 # avoid index out of range in depth_map + ys[(ys < 0) | (ys >= height)] = 0 + visibles &= (depth_map[ys.astype(int), xs.astype(int)] > zs + ) # remove points occluded by other objects + return visibles + + +def _axis_angle_rotation(axis: str, angle: np.ndarray) -> np.ndarray: + """Return the rotation matrices for one of the rotations about an axis of + which Euler angles describe, for each value of the angle given. + + Args: + axis: Axis label "X" or "Y or "Z". + angle: any shape tensor of Euler angles in radians + + Returns: + Rotation matrices as tensor of shape (..., 3, 3). + """ + + cos = np.cos(angle) + sin = np.sin(angle) + one = np.ones_like(angle) + zero = np.zeros_like(angle) + + if axis == 'X': + R_flat = (one, zero, zero, zero, cos, -sin, zero, sin, cos) + elif axis == 'Y': + R_flat = (cos, zero, sin, zero, one, zero, -sin, zero, cos) + elif axis == 'Z': + R_flat = (cos, -sin, zero, sin, cos, zero, zero, zero, one) + else: + raise ValueError('letter must be either X, Y or Z.') + + return np.stack(R_flat, -1).reshape(angle.shape + (3, 3)) + + +def euler_angles_to_matrix(euler_angles: np.ndarray, + convention: str) -> np.ndarray: + """Convert rotations given as Euler angles in radians to rotation matrices. + + Args: + euler_angles: Euler angles in radians as array of shape (..., 3). + convention: Convention string of three uppercase letters from + {"X", "Y", and "Z"}. + + Returns: + Rotation matrices as array of shape (..., 3, 3). + """ + if euler_angles.ndim == 0 or euler_angles.shape[-1] != 3: + raise ValueError('Invalid input euler angles.') + if len(convention) != 3: + raise ValueError('Convention must have 3 letters.') + if convention[1] in (convention[0], convention[2]): + raise ValueError(f'Invalid convention {convention}.') + for letter in convention: + if letter not in ('X', 'Y', 'Z'): + raise ValueError(f'Invalid letter {letter} in convention string.') + matrices = [ + _axis_angle_rotation(c, e) + for c, e in zip(convention, np.split(euler_angles, 3, axis=-1)) + ] + return np.matmul(np.matmul(matrices[0], matrices[1]), matrices[2]) + + +box_corner_vertices = [ + [0, 0, 0], + [1, 0, 0], + [1, 1, 0], + [0, 1, 0], + [0, 0, 1], + [1, 0, 1], + [1, 1, 1], + [0, 1, 1], +] + + +def cal_corners_single(center, size, rotmat): + center = np.array(center).reshape(3) + size = np.array(size).reshape(3) + rotmat = np.array(rotmat).reshape(3, 3) + + relative_corners = np.array(box_corner_vertices) + relative_corners = 2 * relative_corners - 1 + corners = relative_corners * size / 2.0 + corners = np.dot(corners, rotmat.T).reshape(-1, 3) + corners += center + return corners + + +def cal_corners(center, size, rotmat): + center = np.array(center).reshape(-1, 3) + size = np.array(size).reshape(-1, 3) + rotmat = np.array(rotmat).reshape(-1, 3, 3) + bsz = center.shape[0] + + relative_corners = np.array(box_corner_vertices) + relative_corners = 2 * relative_corners - 1 + relative_corners = np.expand_dims(relative_corners, 1).repeat(bsz, axis=1) + corners = relative_corners * size / 2.0 + corners = corners.transpose(1, 0, 2) + corners = np.matmul(corners, rotmat.transpose(0, 2, 1)).reshape(-1, 8, 3) + corners += np.expand_dims(center, 1).repeat(8, axis=1) + + if corners.shape[0] == 1: + corners = corners[0] + return corners + + +def is_inside_box(points, center, size, rotation_mat): + """Check if points are inside a 3D bounding box. + + Args: + points: 3D points, numpy array of shape (n, 3). + center: center of the box, numpy array of shape (3, ). + size: size of the box, numpy array of shape (3, ). + rotation_mat: rotation matrix of the box, numpy array of shape (3, 3). + Returns: + Boolean array of shape (n, ) indicating if each point is inside the box. + """ + assert points.shape[1] == 3, 'points should be of shape (n, 3)' + center = np.array(center) # n, 3 + size = np.array(size) # n, 3 + rotation_mat = np.array(rotation_mat) + assert rotation_mat.shape == ( + 3, 3), f'R should be shape (3,3), but got {rotation_mat.shape}' + # pcd_local = (rotation_mat.T @ (points - center).T).T The expressions are equivalent + pcd_local = (points - center) @ rotation_mat # n, 3 + pcd_local = pcd_local / size * 2.0 # scale to [-1, 1] # n, 3 + pcd_local = abs(pcd_local) + return (pcd_local[:, 0] <= 1) & (pcd_local[:, 1] <= 1) & (pcd_local[:, 2] + <= 1) + + +def is_inside_box_open3d(points, center, size, rotation_mat): + import open3d as o3d + """ + We have verified that the two functions are equivalent. + but the first one is faster. + """ + obb = o3d.geometry.OrientedBoundingBox() + obb.center = center + obb.extent = size + obb.R = rotation_mat + points_o3d = o3d.utility.Vector3dVector(points) + point_indices_within_box = obb.get_point_indices_within_bounding_box( + points_o3d) + ret = np.zeros(points.shape[0], dtype=bool) + ret[point_indices_within_box] = True + return ret + + +def compute_bbox_from_points_open3d(points): + import open3d as o3d + + # 2e-4 seconds for 100 points + # 1e-3 seconds for 1000 points + points = np.array(points).astype(np.float32) + assert points.shape[ + 1] == 3, f'points should be of shape (n, 3), but got {points.shape}' + o3dpoints = o3d.utility.Vector3dVector(points) + obb = o3d.geometry.OrientedBoundingBox.create_from_points(o3dpoints) + + center = obb.center + size = obb.extent + rotation = obb.R + + points_to_check = center + (points - center) * ( + 1.0 - 1e-6) # add a small epsilon to avoid numerical issues + mask = is_inside_box(points_to_check, center, size, rotation) + # mask2 = is_inside_box_open3d(points, center, size, rotation) + # assert (mask == mask2).all(), "mask is different from mask2" + # assert mask2.all(), (center, size, rotation) + assert mask.all(), (center, size, rotation) + + return center, size, rotation + + +def compute_bbox_from_points(points): + # 7.5e-4 seconds for 100 points + # 1e-3 seconds for 1000 points + hull = ConvexHull(points) + points_on_hull = points[hull.vertices] + center = points_on_hull.mean(axis=0) + + points_centered = points_on_hull - center + cov_matrix = np.cov(points_centered, rowvar=False) + eigvals, eigvecs = eigh(cov_matrix) + rotation = eigvecs + # donot use eigvals to compute size, but use the min max projections + proj = points_centered @ eigvecs + min_proj = np.min(proj, axis=0) + max_proj = np.max(proj, axis=0) + size = max_proj - min_proj + shift = (max_proj + min_proj) / 2.0 + center = center + shift @ rotation.T + + return center, size, rotation + + +def check_pcd_similarity(pcd1, pcd2): + """check whether two point clouds are close enough. + + There might be a permulatation issue, so we use a threshold to check. + Args: + pcd1: np.array of shape (n, 3) + pcd2: np.array of shape (n, 3) + """ + assert pcd1.shape == pcd2.shape, f'pcd1 and pcd2 should have the same shape, but got {pcd1.shape} and {pcd2.shape}' + pcd1 = pcd1.astype(np.float64) + pcd2 = pcd2.astype(np.float64) + distances_mat = np.sqrt( + np.sum((pcd1.reshape(1, -1, 3) - pcd2.reshape(-1, 1, 3))**2, axis=-1)) + distances_rowwise = np.min(distances_mat, axis=1) + distances_colwise = np.min(distances_mat, axis=0) + threshold = 1e-2 + return (distances_rowwise < threshold).all() and (distances_colwise < + threshold).all() + + +if __name__ == '__main__': + from tqdm import tqdm + centers = np.random.rand(100, 3) * 10 - 5 + sizes = np.random.rand(100, 3) * 1 + 1 + angle_range = np.pi + roll, pitch, yaw = np.random.rand(3) * angle_range * 2 - angle_range + rotation = euler_angles_to_matrix(np.array([yaw, pitch, roll]), 'ZYX') + corners = cal_corners(centers, sizes, rotation) + for i in range(100): + center = centers[i] + size = sizes[i] + corners_i = corners[i] + assert check_pcd_similarity(corners_i, + cal_corners_single(center, size, rotation)) + exit() + + for i in tqdm(range(10000)): + center = np.random.rand(3) * 10 - 5 + size = np.random.rand(3) * 1 + 1 + angle_range = np.pi + roll, pitch, yaw = np.random.rand(3) * angle_range * 2 - angle_range + # center = np.array([0, 0, 0]) + # size = np.array([1, 1, 1]) + # roll, pitch, yaw = 0, 0, 1 + rotation = euler_angles_to_matrix(np.array([yaw, pitch, roll]), 'ZYX') + corners = cal_corners_single(center, size, rotation) + # print(points) + center2, size2, rotation2 = compute_bbox_from_points_open3d(corners) + corners2 = cal_corners_single(center2, size2, rotation2) + compare_dict = { + 'center': (center, center2), + 'size': (size, size2), + 'rotation': (rotation, rotation2), + 'corners': (corners, corners2) + } + if not check_pcd_similarity(corners, corners2): + print(compare_dict) + exit() + center3, size3, rotation3 = compute_bbox_from_points(corners) + corners3 = cal_corners_single(center3, size3, rotation3) + compare_dict = { + 'center': (center, center3), + 'size': (size, size3), + 'rotation': (rotation, rotation3), + 'corners': (corners, corners3) + } + if not check_pcd_similarity(corners, corners3): + for k, v in compare_dict.items(): + print(k, v) + exit() + for i in tqdm(range(1000)): + points = np.random.rand(100, 3) * 10 - 5 + center, size, rotation = compute_bbox_from_points_open3d(points) + corners = cal_corners_single(center, size, rotation) + center2, size2, rotation2 = compute_bbox_from_points(points) + corners2 = cal_corners_single(center2, size2, rotation2) + vol1 = size[0] * size[1] * size[2] + vol2 = size2[0] * size2[1] * size2[2] + if not check_pcd_similarity(corners, corners2): + print(vol1, vol2) + print(f'vol1: {vol1}, vol2: {vol2}') + print(f'center: {center}, center2: {center2}') + print(f'size: {size}, size2: {size2}') + print(f'rotation: {rotation}, rotation2: {rotation2}') + print(f'corners: {corners}, corners2: {corners2}') + exit() diff --git a/models/Scanrefer/lib/utils_read.py b/models/Scanrefer/lib/utils_read.py new file mode 100644 index 0000000..bc029a8 --- /dev/null +++ b/models/Scanrefer/lib/utils_read.py @@ -0,0 +1,482 @@ +import json +import os + +import cv2 +import numpy as np +from tqdm import tqdm + +EXCLUDED_OBJECTS = ['wall', 'ceiling', 'floor'] + + +def reverse_multi2multi_mapping(mapping): + """ + Args: + mapping: dict in format key1:[value1, value2], key2:[value2, value3] + Returns: + mapping: dict in format value1:[key1], value2:[key1, key2], value3:[key2] + """ + output = {} + possible_values = [] + for key, values in mapping.items(): + for value in values: + possible_values.append(value) + possible_values = list(set(possible_values)) + for value in possible_values: + output[value] = [] + for key, values in mapping.items(): + for value in values: + output[value].append(key) + return output + + +def reverse_121_mapping(mapping): + """Reverse a 1-to-1 mapping. + + Args: + mapping: dict in format key1:value1, key2:value2 + Returns: + mapping: dict in format value1:key1, value2:key2 + """ + return {v: k for k, v in mapping.items()} + + +def load_json(path): + if os.path.getsize(path) == 0: + return None + with open(path, 'r', encoding='utf-8') as f: + data = json.load(f) + return data + + +def read_extrinsic_dir(directory): + """ + Returns: + extrinsics: numpy array of extrinsic matrices, shape (N, 4, 4) + ids: list of ids (str) of matrix files. + """ + extrinsics = [] + ids = [] + for file in os.listdir(directory): + if file.endswith('.txt') or file.endswith('.npy'): + if file.startswith('depth_intrinsic') or file.startswith( + 'intrinsic'): + continue + path = os.path.join(directory, file) + extrinsics.append(read_extrinsic(path)) + path = path.replace('\\', '/') + ids.append(file.split('.')[0]) + return extrinsics, ids + + +def _pad_extrinsic(mat): + """transforms the extrinsic matrix to the 4x4 form.""" + mat = np.array(mat) + if mat.shape == (3, 4): + mat = np.vstack((mat, [0, 0, 0, 1])) + elif mat.shape != (4, 4): + raise ValueError('Invalid shape of matrix.') + return mat + + +def read_extrinsic(path): + """returns a 4x4 numpy array of intrinsic matrix.""" + if path.endswith('.txt'): + mat = np.loadtxt(path) + return _pad_extrinsic(mat) + elif path.endswith('.npy'): + mat = np.load(path) + return _pad_extrinsic(mat) + else: + raise ValueError('Invalid file extension.') + + +def _read_intrinsic_mp3d(path): + a = np.loadtxt(path) + intrinsic = np.identity(4, dtype=float) + intrinsic[0][0] = a[2] # fx + intrinsic[1][1] = a[3] # fy + intrinsic[0][2] = a[4] # cx + intrinsic[1][2] = a[5] # cy + # a[0], a[1] are the width and height of the image + return intrinsic + + +def _read_intrinsic_scannet(path): + intrinsic = np.loadtxt(path) + return intrinsic + + +def read_intrinsic(path, mode='scannet'): + """Reads intrinsic matrix from file. + + Returns: + extended intrinsic of shape (4, 4) + """ + if mode == 'scannet': + return _read_intrinsic_scannet(path) + elif mode == 'mp3d': + return _read_intrinsic_mp3d(path) + else: + raise ValueError('Invalid mode {}.'.format(mode)) + + +def _read_axis_align_matrix_scannet(path): + with open(path, 'r') as file: + first_line = file.readline() + vals = first_line.strip().split(' ')[2:] + vals = np.array(vals, dtype=np.float64) + output = vals.reshape(4, 4) + return output + + +def read_axis_align_matrix(path, mode): + if mode == 'scannet': + return _read_axis_align_matrix_scannet(path) + else: + raise ValueError('Invalid mode {}.'.format(mode)) + + +def read_depth_map(path): + """Reads depth map from file. + + Returns: + depth: numpy array of depth values, shape (H, W) + """ + if '3rscan' in path: + path = path[:-4] + '.pgm' + depth_map = cv2.imread(path, cv2.IMREAD_UNCHANGED) + if depth_map is None: + raise ValueError(f'Cannot read file {path}') + depth_map = depth_map / 1000.0 # '/=' does not work. Interesting. + if 'matterport' in path or 'mp3d' in path: + depth_map /= 4.0 # for matterport, depth should be divided by 4000 + return depth_map + + +def read_bboxes_json(path, return_id=False, return_type=False): + """ + Returns: + boxes: numpy array of bounding boxes, shape (M, 9): xyz, lwh, ypr + ids: (optional) numpy array of obj ids, shape (M,) + types: (optional) list of strings, each string is a type of object + """ + with open(path, 'r') as f: + bboxes_json = json.load(f) + boxes = [] + ids = [] + types = [] + for i in range(len(bboxes_json)): + if bboxes_json[i]['obj_type'] in EXCLUDED_OBJECTS: + continue + box = bboxes_json[i]['psr'] + position = np.array( + [box['position']['x'], box['position']['y'], box['position']['z']]) + size = np.array( + [box['scale']['x'], box['scale']['y'], box['scale']['z']]) + euler_angles = np.array( + [box['rotation']['x'], box['rotation']['y'], box['rotation']['z']]) + boxes.append(np.concatenate([position, size, euler_angles])) + ids.append(int(bboxes_json[i]['obj_id'])) + types.append(bboxes_json[i]['obj_type']) + boxes = np.array(boxes) + if return_id and return_type: + ids = np.array(ids) + return boxes, ids, types + if return_id: + ids = np.array(ids) + return boxes, ids + if return_type: + return boxes, types + return boxes + + +def get_scene_prefix(path): + if '3rscan' in path: + return '3rscan' + elif 'matterport' in path or 'mp3d' in path: + return 'matterport3d' + elif 'scene' in path: + return 'scannet' + else: + return '' + + +def read_type2int(path): + with open(path, 'rb') as f: + data = np.load(f, allow_pickle=True) + metainfo = data['metainfo'] + object_type_to_int = metainfo['categories'] + return object_type_to_int + + +def apply_mapping_to_keys(d, mappings): + """ + Args: + d: a dictionary + mappings: dictionary(s) of mappings, e.g. {"old_key1": "new_key1", "old_key2": "new_key2"} + Returns: + a new dictionary with keys changed according to mappings + """ + if not isinstance(mappings, list): + mappings = [mappings] + for mapping in mappings: + d = {mapping.get(k, k): v for k, v in d.items()} + return d + + +def read_annotation_pickle(path, show_progress=True): + """ + Returns: A dictionary. Format. scene_id : (bboxes, object_ids, object_types, visible_view_object_dict, extrinsics_c2w, axis_align_matrix, intrinsics, image_paths) + bboxes: numpy array of bounding boxes, shape (N, 9): xyz, lwh, ypr + object_ids: numpy array of obj ids, shape (N,) + object_types: list of strings, each string is a type of object + visible_view_object_dict: a dictionary {view_id: visible_instance_ids} + extrinsics_c2w: a list of 4x4 matrices, each matrix is the extrinsic matrix of a view + axis_align_matrix: a 4x4 matrix, the axis-aligned matrix of the scene + intrinsics: a list of 4x4 matrices, each matrix is the intrinsic matrix of a view + image_paths: a list of strings, each string is the path of an image in the scene + """ + with open(path, 'rb') as f: + data = np.load(f, allow_pickle=True) + metainfo = data['metainfo'] + object_type_to_int = metainfo['categories'] + object_int_to_type = {v: k for k, v in object_type_to_int.items()} + datalist = data['data_list'] + output_data = {} + pbar = tqdm(range(len(datalist))) if show_progress else range( + len(datalist)) + for scene_idx in pbar: + images = datalist[scene_idx]['images'] + intrinsic = datalist[scene_idx].get('cam2img', None) # a 4x4 matrix + missing_intrinsic = False + if intrinsic is None: + missing_intrinsic = True # each view has different intrinsic for mp3d + depth_intrinsic = datalist[scene_idx].get( + 'cam2depth', None) # a 4x4 matrix, for 3rscan + if depth_intrinsic is None and not missing_intrinsic: + depth_intrinsic = datalist[scene_idx][ + 'depth2img'] # a 4x4 matrix, for scannet + axis_align_matrix = datalist[scene_idx][ + 'axis_align_matrix'] # a 4x4 matrix + scene_id = images[0]['img_path'].split('/')[-2] # str + + instances = datalist[scene_idx]['instances'] + bboxes = [] + object_ids = [] + object_types = [] + object_type_ints = [] + for object_idx in range(len(instances)): + bbox_3d = instances[object_idx]['bbox_3d'] # list of 9 values + bbox_label_3d = instances[object_idx]['bbox_label_3d'] # int + bbox_id = instances[object_idx]['bbox_id'] # int + object_type = object_int_to_type[bbox_label_3d] + # if object_type in EXCLUDED_OBJECTS: + # continue + object_type_ints.append(bbox_label_3d) + object_types.append(object_type) + bboxes.append(bbox_3d) + object_ids.append(bbox_id) + bboxes = np.array(bboxes) + object_ids = np.array(object_ids) + object_type_ints = np.array(object_type_ints) + + visible_view_object_dict = {} + extrinsics_c2w = [] + intrinsics = [] + depth_intrinsics = [] + image_paths = [] + for image_idx in range(len(images)): + img_path = images[image_idx]['img_path'] # str + if len(img_path.split('/')) == 3: # should be 4, add prefix + # example input: posed_images/3rscan0001/000000.jpg + # example output: 3rscan/posed_images/3rscan0001/000000.jpg + scene_prefix = get_scene_prefix(img_path) + img_path = os.path.join(scene_prefix, img_path) + extrinsic_id = img_path.split('/')[-1].split('.')[0] # str + cam2global = images[image_idx]['cam2global'] # a 4x4 matrix + if missing_intrinsic: + intrinsic = images[image_idx]['cam2img'] + depth_intrinsic = images[image_idx]['cam2depth'] + visible_instance_indices = images[image_idx][ + 'visible_instance_ids'] # numpy array of int + visible_instance_ids = object_ids[visible_instance_indices] + visible_view_object_dict[extrinsic_id] = visible_instance_ids + extrinsics_c2w.append(cam2global) + intrinsics.append(intrinsic) + depth_intrinsics.append(depth_intrinsic) + image_paths.append(img_path) + if show_progress: + pbar.set_description(f'Processing scene {scene_id}') + output_data[scene_id] = { + 'bboxes': bboxes, + 'object_ids': object_ids, + 'object_types': object_types, + 'object_type_ints': object_type_ints, + 'visible_view_object_dict': visible_view_object_dict, + 'extrinsics_c2w': extrinsics_c2w, + 'axis_align_matrix': axis_align_matrix, + 'intrinsics': intrinsics, + 'depth_intrinsics': depth_intrinsics, + 'image_paths': image_paths, + } + return output_data + + +def read_annotation_pickles(paths): + """Read multiple annotation pickles and merge them into one dictionary. + + Args: + paths: a list of paths to annotation pickles. + Returns: Please refer to the return value of read_annotation_pickle() + """ + output_data = {} + if isinstance(paths, str): + paths = [paths] + for path in paths: + data = read_annotation_pickle(path) + output_data.update(data) + output_data = dict(sorted(output_data.items())) + return output_data + + +def read_scene_id_mapping(mode): + assert mode in ['mp3d', '3rscan'] # scannet do not need this mapping + fname = f'/mnt/petrelfs/linjingli/mmscan_modelzoo-main/embodiedscan_infos/{mode}_mapping.json' + if not os.path.exists(fname): + fname = f'/mnt/petrelfs/linjingli/mmscan_modelzoo-main/embodiedscan_infos/{mode}_mapping.json' + with open(fname, 'r') as f: + mapping = json.load(f) + return mapping + + +RAW2NUM_3RSCAN = read_scene_id_mapping('3rscan') +NUM2RAW_3RSCAN = {v: k for k, v in RAW2NUM_3RSCAN.items()} +RAW2NUM_MP3D = read_scene_id_mapping('mp3d') +NUM2RAW_MP3D = {v: k for k, v in RAW2NUM_MP3D.items()} + + +def is_valid_name(name): + is_scannet = 'scene' in name or 'scannet' in name + is_3rscan = '3rscan' in name + is_mp3d = 'mp3d' in name or 'matterport' in name + is_valid = is_scannet + is_3rscan + is_mp3d == 1 + if not is_valid: + print(f'Invalid name {name}') + return is_valid + + +def is_sample_idx(name): + if not is_valid_name(name): + return False + length = len(name.split('/')) + return length >= 2 + + +def is_scene_id(name): + if not is_valid_name(name): + return False + length = len(name.split('/')) + return length == 1 + + +def sample_idx_to_scene_id(sample_idx): + """sample index follows the "raw" rule, directly downloaded from the + internet. + + scene_id follows the "num"bered rule, used in the dataset. + """ + is_scannet = 'scannet' in sample_idx + is_3rscan = '3rscan' in sample_idx + is_mp3d = 'mp3d' in sample_idx or 'matterport' in sample_idx + assert is_scannet + is_3rscan + is_mp3d == 1, f'Invalid sample_idx {sample_idx}' + if is_scannet: + scene_id = sample_idx.split('/')[-1] + elif is_3rscan: + raw_id = sample_idx.split('/')[-1] + scene_id = RAW2NUM_3RSCAN[raw_id] + elif is_mp3d: + _, raw_id, region_id = sample_idx.split('/') + scene_id = RAW2NUM_MP3D[raw_id] + scene_id = f'{scene_id}_{region_id}' + return scene_id + + +def scene_id_to_sample_idx(scene_id): + is_scannet = 'scene' in scene_id + is_3rscan = '3rscan' in scene_id + is_mp3d = 'mp3d' in scene_id + assert is_scannet + is_3rscan + is_mp3d == 1, f'Invalid scene_id {scene_id}' + if is_scannet: + sample_idx = f'scannet/{scene_id}' + elif is_3rscan: + raw_id = NUM2RAW_3RSCAN[scene_id] + sample_idx = f'3rscan/{raw_id}' + elif is_mp3d: + scene_id, region_id = scene_id.split('_region') + raw_id = NUM2RAW_MP3D[scene_id] + sample_idx = f'mp3d/{raw_id}/region{region_id}' + return sample_idx + + +def to_scene_id(name): + return name if is_scene_id(name) else sample_idx_to_scene_id(name) + + +def to_sample_idx(name): + return name if is_sample_idx(name) else scene_id_to_sample_idx(name) + + +def read_es_info(path, show_progress=True, count_type_from_zero=False): + data = np.load(path, allow_pickle=True) + data_list = data['data_list'] + object_type_to_int = data['metainfo']['categories'] + object_int_to_type = {v: k for k, v in object_type_to_int.items()} + output_data = {} + pbar = tqdm(data_list) if show_progress else data_list + for data in pbar: + if 'sample_idx' in data: + sample_idx = data['sample_idx'] + scene_id = sample_idx_to_scene_id(sample_idx) + else: + scene_id = data['images'][0]['img_path'].split('/')[-2] # str + sample_idx = scene_id_to_sample_idx(scene_id) + bboxes, object_ids, object_types_int, object_types = [], [], [], [] + for inst in data['instances']: + bbox_label_3d = inst['bbox_label_3d'] + object_type = object_int_to_type[bbox_label_3d] + bbox_label_3d -= 1 if count_type_from_zero else 0 + bboxes.append(inst['bbox_3d']) + object_ids.append(inst['bbox_id']) + object_types_int.append(bbox_label_3d) + object_types.append(object_type) + + bboxes = np.array(bboxes) + object_ids = np.array(object_ids) + object_types_int = np.array(object_types_int) + + output_data[scene_id] = { + 'scene_id': scene_id, + 'sample_idx': sample_idx, + 'bboxes': bboxes, + 'object_ids': object_ids, + 'object_types': object_types, + 'object_type_ints': object_types_int, + } + return output_data + + +def read_es_infos(paths, show_progress=True, count_type_from_zero=False): + output_data = {} + if isinstance(paths, str): + paths = [paths] + for path in paths: + data = read_es_info(path, show_progress, count_type_from_zero) + output_data.update(data) + return output_data + + +if __name__ == '__main__': + # pickle_file = "D:\Projects\shared_data\embodiedscan_infos\competition_ver\embodiedscan_infos_val.pkl" + pickle_file = 'D:\Projects\shared_data\embodiedscan_infos\embodiedscan_infos_val_full.pkl' + read_es_infos(pickle_file) + # read_annotation_pickle(pickle_file) diff --git a/models/Scanrefer/models/backbone_module.py b/models/Scanrefer/models/backbone_module.py new file mode 100644 index 0000000..6b2bb05 --- /dev/null +++ b/models/Scanrefer/models/backbone_module.py @@ -0,0 +1,140 @@ +import os +import sys + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from lib.pointnet2.pointnet2_modules import (PointnetFPModule, + PointnetSAModuleVotes) + + +class Pointnet2Backbone(nn.Module): + r""" + Backbone network for point cloud feature learning. + Based on Pointnet++ single-scale grouping network. + + Parameters + ---------- + input_feature_dim: int + Number of input channels in the feature descriptor for each point. + e.g. 3 for RGB. + """ + + def __init__(self, input_feature_dim=0): + super().__init__() + + self.input_feature_dim = input_feature_dim + + # --------- 4 SET ABSTRACTION LAYERS --------- + self.sa1 = PointnetSAModuleVotes(npoint=2048, + radius=0.2, + nsample=64, + mlp=[input_feature_dim, 64, 64, 128], + use_xyz=True, + normalize_xyz=True) + + self.sa2 = PointnetSAModuleVotes(npoint=1024, + radius=0.4, + nsample=32, + mlp=[128, 128, 128, 256], + use_xyz=True, + normalize_xyz=True) + + self.sa3 = PointnetSAModuleVotes(npoint=512, + radius=0.8, + nsample=16, + mlp=[256, 128, 128, 256], + use_xyz=True, + normalize_xyz=True) + + self.sa4 = PointnetSAModuleVotes(npoint=256, + radius=1.2, + nsample=16, + mlp=[256, 128, 128, 256], + use_xyz=True, + normalize_xyz=True) + + # --------- 2 FEATURE UPSAMPLING LAYERS -------- + self.fp1 = PointnetFPModule(mlp=[256 + 256, 256, 256]) + self.fp2 = PointnetFPModule(mlp=[256 + 256, 256, 256]) + + def _break_up_pc(self, pc): + xyz = pc[..., :3].contiguous() + features = pc[..., 3:].transpose( + 1, 2).contiguous() if pc.size(-1) > 3 else None + + return xyz, features + + def forward(self, data_dict): + r""" + Forward pass of the network + + Parameters + ---------- + pointcloud: Variable(torch.cuda.FloatTensor) + (B, N, 3 + input_feature_dim) tensor + Point cloud to run predicts on + Each point in the point-cloud MUST + be formated as (x, y, z, features...) + + Returns + ---------- + data_dict: {XXX_xyz, XXX_features, XXX_inds} + XXX_xyz: float32 Tensor of shape (B,K,3) + XXX_features: float32 Tensor of shape (B,K,D) + XXX-inds: int64 Tensor of shape (B,K) values in [0,N-1] + """ + + pointcloud = data_dict['point_clouds'] + + batch_size = pointcloud.shape[0] + + xyz, features = self._break_up_pc(pointcloud) + + # --------- 4 SET ABSTRACTION LAYERS --------- + xyz, features, fps_inds = self.sa1(xyz, features) + data_dict['sa1_inds'] = fps_inds + data_dict['sa1_xyz'] = xyz + data_dict['sa1_features'] = features + + xyz, features, fps_inds = self.sa2( + xyz, features) # this fps_inds is just 0,1,...,1023 + data_dict['sa2_inds'] = fps_inds + data_dict['sa2_xyz'] = xyz + data_dict['sa2_features'] = features + + xyz, features, fps_inds = self.sa3( + xyz, features) # this fps_inds is just 0,1,...,511 + data_dict['sa3_xyz'] = xyz + data_dict['sa3_features'] = features + + xyz, features, fps_inds = self.sa4( + xyz, features) # this fps_inds is just 0,1,...,255 + data_dict['sa4_xyz'] = xyz + data_dict['sa4_features'] = features + + # --------- 2 FEATURE UPSAMPLING LAYERS -------- + features = self.fp1(data_dict['sa3_xyz'], data_dict['sa4_xyz'], + data_dict['sa3_features'], + data_dict['sa4_features']) + features = self.fp2(data_dict['sa2_xyz'], data_dict['sa3_xyz'], + data_dict['sa2_features'], features) + data_dict['fp2_features'] = features + data_dict['fp2_xyz'] = data_dict['sa2_xyz'] + num_seed = data_dict['fp2_xyz'].shape[1] + data_dict['fp2_inds'] = data_dict[ + 'sa1_inds'][:, 0: + num_seed] # indices among the entire input point clouds + return data_dict + + +if __name__ == '__main__': + backbone_net = Pointnet2Backbone(input_feature_dim=3).cuda() + print(backbone_net) + backbone_net.eval() + out = backbone_net(torch.rand(16, 20000, 6).cuda()) + for key in sorted(out.keys()): + print(key, '\t', out[key].shape) diff --git a/models/Scanrefer/models/lang_module.py b/models/Scanrefer/models/lang_module.py new file mode 100644 index 0000000..c386087 --- /dev/null +++ b/models/Scanrefer/models/lang_module.py @@ -0,0 +1,56 @@ +import os +import sys + +import torch +import torch.nn as nn +from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence + + +class LangModule(nn.Module): + + def __init__(self, + num_text_classes, + use_lang_classifier=True, + use_bidir=False, + emb_size=300, + hidden_size=256): + super().__init__() + + self.num_text_classes = num_text_classes + self.use_lang_classifier = use_lang_classifier + self.use_bidir = use_bidir + + self.gru = nn.GRU(input_size=emb_size, + hidden_size=hidden_size, + batch_first=True, + bidirectional=self.use_bidir) + lang_size = hidden_size * 2 if self.use_bidir else hidden_size + + # language classifier + if use_lang_classifier: + self.lang_cls = nn.Sequential( + nn.Linear(lang_size, num_text_classes), nn.Dropout()) + + def forward(self, data_dict): + """encode the input descriptions.""" + + word_embs = data_dict['lang_feat'] + lengths = data_dict['lang_len'].to('cpu') + lang_feat = pack_padded_sequence(word_embs, + lengths, + batch_first=True, + enforce_sorted=False) + + # encode description + _, lang_last = self.gru(lang_feat) + lang_last = lang_last.permute(1, 0, 2).contiguous().flatten( + start_dim=1) # batch_size, hidden_size * num_dir + + # store the encoded language features + data_dict['lang_emb'] = lang_last # B, hidden_size + + # classify + if self.use_lang_classifier: + data_dict['lang_scores'] = self.lang_cls(data_dict['lang_emb']) + + return data_dict diff --git a/models/Scanrefer/models/match_module.py b/models/Scanrefer/models/match_module.py new file mode 100644 index 0000000..d3b467c --- /dev/null +++ b/models/Scanrefer/models/match_module.py @@ -0,0 +1,65 @@ +import torch +import torch.nn as nn + + +class MatchModule(nn.Module): + + def __init__(self, num_proposals=256, lang_size=256, hidden_size=128): + super().__init__() + + self.num_proposals = num_proposals + self.lang_size = lang_size + self.hidden_size = hidden_size + + self.fuse = nn.Sequential( + nn.Conv1d(self.lang_size + 128, hidden_size, 1), nn.ReLU()) + # self.match = nn.Conv1d(hidden_size, 1, 1) + self.match = nn.Sequential(nn.Conv1d(hidden_size, hidden_size, 1), + nn.ReLU(), nn.BatchNorm1d(hidden_size), + nn.Conv1d(hidden_size, hidden_size, 1), + nn.ReLU(), nn.BatchNorm1d(hidden_size), + nn.Conv1d(hidden_size, 1, 1)) + + def forward(self, data_dict): + """ + Args: + xyz: (B,K,3) + features: (B,C,K) + Returns: + scores: (B,num_proposal,2+3+NH*2+NS*4) + """ + + # unpack outputs from detection branch + features = data_dict[ + 'aggregated_vote_features'] # batch_size, num_proposal, 128 + objectness_masks = data_dict['objectness_scores'].max( + 2)[1].float().unsqueeze(2) # batch_size, num_proposals, 1 + + # unpack outputs from language branch + lang_feat = data_dict['lang_emb'] # batch_size, lang_size + lang_feat = lang_feat.unsqueeze(1).repeat( + 1, self.num_proposals, 1) # batch_size, num_proposals, lang_size + + # fuse + features = torch.cat( + [features, lang_feat], + dim=-1) # batch_size, num_proposals, 128 + lang_size + features = features.permute( + 0, 2, 1).contiguous() # batch_size, 128 + lang_size, num_proposals + + # fuse features + features = self.fuse( + features) # batch_size, hidden_size, num_proposals + + # mask out invalid proposals + objectness_masks = objectness_masks.permute( + 0, 2, 1).contiguous() # batch_size, 1, num_proposals + features = features * objectness_masks + + # match + confidences = self.match(features).squeeze( + 1) # batch_size, num_proposals + + data_dict['cluster_ref'] = confidences + + return data_dict diff --git a/models/Scanrefer/models/proposal_module.py b/models/Scanrefer/models/proposal_module.py new file mode 100644 index 0000000..f3454fc --- /dev/null +++ b/models/Scanrefer/models/proposal_module.py @@ -0,0 +1,171 @@ +"""Modified from: https://github.com/facebookresearch/votenet/blob/master/model +s/proposal_module.py.""" + +import os +import sys + +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +import lib.pointnet2.pointnet2_utils +from lib.pointnet2.pointnet2_modules import PointnetSAModuleVotes + + +def normalize_vector(vector): + norm = torch.norm(vector, dim=1, keepdim=True) + 1e-8 + normalized_vector = vector / norm + return normalized_vector + + +def cross_product(a, b): + cross_product = torch.cross(a, b, dim=1) + return cross_product + + +def ortho_6d_2_Mat(x_raw, y_raw): + """x_raw, y_raw: both tensors batch*3.""" + y = normalize_vector(y_raw) + z = cross_product(x_raw, y) + z = normalize_vector(z) # batch*3 + x = cross_product(y, z) # batch*3 + + x = x.unsqueeze(2) + y = y.unsqueeze(2) + z = z.unsqueeze(2) + matrix = torch.cat((x, y, z), 2) # batch*3*3 + return matrix + + +class ProposalModule(nn.Module): + + def __init__(self, + num_class, + num_heading_bin, + num_size_cluster, + mean_size_arr, + num_proposal, + sampling, + seed_feat_dim=256): + super().__init__() + + self.num_class = num_class + self.angle_bin = 6 + self.num_size_cluster = num_size_cluster + self.mean_size_arr = mean_size_arr + self.num_proposal = num_proposal + self.sampling = sampling + self.seed_feat_dim = seed_feat_dim + + # Vote clustering + self.vote_aggregation = PointnetSAModuleVotes( + npoint=self.num_proposal, + radius=0.3, + nsample=16, + mlp=[self.seed_feat_dim, 128, 128, 128], + use_xyz=True, + normalize_xyz=True) + + # Object proposal/detection + # Objectness scores (2), center residual (3), + # heading class+residual (num_heading_bin*2), size class+residual(num_size_cluster*4) + self.proposal = nn.Sequential( + nn.Conv1d(128, 128, 1, bias=False), nn.BatchNorm1d(128), nn.ReLU(), + nn.Conv1d(128, 128, 1, bias=False), nn.BatchNorm1d(128), nn.ReLU(), + nn.Conv1d( + 128, + 2 + 3 + self.angle_bin + num_size_cluster * 4 + self.num_class, + 1)) + + def forward(self, xyz, features, data_dict): + """ + Args: + xyz: (B,K,3) + features: (B,C,K) + Returns: + scores: (B,num_proposal,2+3+NH*2+NS*4) + """ + + # Farthest point sampling (FPS) on votes + xyz, features, fps_inds = self.vote_aggregation(xyz, features) + + sample_inds = fps_inds + + data_dict['aggregated_vote_xyz'] = xyz # (batch_size, num_proposal, 3) + data_dict['aggregated_vote_features'] = features.permute( + 0, 2, 1).contiguous() # (batch_size, num_proposal, 128) + data_dict[ + 'aggregated_vote_inds'] = sample_inds # (batch_size, num_proposal,) # should be 0,1,2,...,num_proposal + + # --------- PROPOSAL GENERATION --------- + net = self.proposal(features) + data_dict = self.decode_scores(net, data_dict, self.num_class, + self.num_size_cluster, + self.mean_size_arr) + + return data_dict + + def decode_scores(self, net, data_dict, num_class, num_size_cluster, + mean_size_arr): + """decode the predicted parameters for the bounding boxes.""" + # here we get: 1. angle info 2. size ratio info + + net_transposed = net.transpose( + 2, 1).contiguous() # (batch_size, 1024, ..) + batch_size = net_transposed.shape[0] + num_proposal = net_transposed.shape[1] + + objectness_scores = net_transposed[:, :, 0:2] + + base_xyz = data_dict[ + 'aggregated_vote_xyz'] # (batch_size, num_proposal, 3) + center = base_xyz + net_transposed[:, :, 2: + 5] # (batch_size, num_proposal, 3) + + # angle + x_raw = net_transposed[:, :, 5:5 + 3] + y_raw = net_transposed[:, :, 5 + 3:5 + 3 + 3] + batch_size = x_raw.shape[0] + x_raw = x_raw.view(-1, 3) + y_raw = y_raw.view(-1, 3) + rot_mat = ortho_6d_2_Mat(x_raw, y_raw).reshape(batch_size, -1, 3, 3) + + size_scores = net_transposed[:, :, 5 + self.angle_bin:5 + + self.angle_bin + num_size_cluster] + size_residuals_normalized = net_transposed[:, :, 5 + self.angle_bin + + num_size_cluster:5 + + self.angle_bin + + num_size_cluster * 4].view( + [ + batch_size, + num_proposal, + num_size_cluster, 3 + ] + ) # Bxnum_proposalxnum_size_clusterx3 + + sem_cls_scores = net_transposed[:, :, + 5 + self.angle_bin + num_size_cluster * + 4:] # Bxnum_proposalx10 + + # store + data_dict['objectness_scores'] = objectness_scores + data_dict['center'] = center + # data_dict['heading_scores'] = heading_scores # Bxnum_proposalxnum_heading_bin + # data_dict['heading_residuals_normalized'] = heading_residuals_normalized # Bxnum_proposalxnum_heading_bin (should be -1 to 1) + # data_dict['heading_residuals'] = heading_residuals_normalized * (np.pi/num_heading_bin) # Bxnum_proposalxnum_heading_bin + data_dict['rot_mat'] = rot_mat + data_dict['size_scores'] = size_scores + data_dict['size_residuals_normalized'] = size_residuals_normalized + data_dict[ + 'size_residuals'] = size_residuals_normalized * torch.from_numpy( + mean_size_arr.astype( + np.float32)).cuda().unsqueeze(0).unsqueeze(0) + data_dict['size_calc'] = ( + size_residuals_normalized + 1) * torch.from_numpy( + mean_size_arr.astype( + np.float32)).cuda().unsqueeze(0).unsqueeze(0) + data_dict['sem_cls_scores'] = sem_cls_scores + + return data_dict diff --git a/models/Scanrefer/models/refnet.py b/models/Scanrefer/models/refnet.py new file mode 100644 index 0000000..cbe5ff4 --- /dev/null +++ b/models/Scanrefer/models/refnet.py @@ -0,0 +1,144 @@ +import os +import sys + +import numpy as np +import torch +import torch.nn as nn + +sys.path.append(os.path.join(os.getcwd(), 'lib')) # HACK add the lib folder +from models.backbone_module import Pointnet2Backbone +from models.lang_module import LangModule +from models.match_module import MatchModule +from models.proposal_module import ProposalModule +from models.voting_module import VotingModule + + +class RefNet(nn.Module): + + def __init__(self, + num_class, + num_heading_bin, + num_size_cluster, + mean_size_arr, + input_feature_dim=0, + num_proposal=128, + vote_factor=1, + sampling='vote_fps', + use_lang_classifier=True, + use_bidir=False, + no_reference=False, + emb_size=300, + hidden_size=256): + super().__init__() + + self.num_class = num_class + self.num_heading_bin = num_heading_bin + self.num_size_cluster = num_size_cluster + self.mean_size_arr = mean_size_arr + assert (mean_size_arr.shape[0] == self.num_size_cluster) + self.input_feature_dim = input_feature_dim + self.num_proposal = num_proposal + self.vote_factor = vote_factor + self.sampling = sampling + self.use_lang_classifier = use_lang_classifier + self.use_bidir = use_bidir + self.no_reference = no_reference + + # --------- PROPOSAL GENERATION --------- + # Backbone point feature learning + self.backbone_net = Pointnet2Backbone( + input_feature_dim=self.input_feature_dim) + + # Hough voting + self.vgen = VotingModule(self.vote_factor, 256) + + # Vote aggregation and object proposal + self.proposal = ProposalModule(num_class, num_heading_bin, + num_size_cluster, mean_size_arr, + num_proposal, sampling) + + if not no_reference: + # --------- LANGUAGE ENCODING --------- + # Encode the input descriptions into vectors + # (including attention and language classification) + self.lang = LangModule(num_class, use_lang_classifier, use_bidir, + emb_size, hidden_size) + + # --------- PROPOSAL MATCHING --------- + # Match the generated proposals and select the most confident ones + self.match = MatchModule(num_proposals=num_proposal, + lang_size=(1 + int(self.use_bidir)) * + hidden_size) + + def forward(self, data_dict): + """Forward pass of the network. + + Args: + data_dict: dict + { + point_clouds, + lang_feat + } + + point_clouds: Variable(torch.cuda.FloatTensor) + (B, N, 3 + input_channels) tensor + Point cloud to run predicts on + Each point in the point-cloud MUST + be formated as (x, y, z, features...) + Returns: + end_points: dict + """ + # for k, v in data_dict.items(): + # if isinstance(v, np.ndarray): + # data_dict[k] = torch.tensor(v).to("cuda") + # elif isinstance(v, torch.Tensor): + # data_dict[k] = v.to("cuda") + ####################################### + # # + # DETECTION BRANCH # + # # + ####################################### + + # --------- HOUGH VOTING --------- + # import pdb + # pdb.set_trace() + data_dict = self.backbone_net(data_dict) + + # --------- HOUGH VOTING --------- + xyz = data_dict['fp2_xyz'] + features = data_dict['fp2_features'] + data_dict['seed_inds'] = data_dict['fp2_inds'] + data_dict['seed_xyz'] = xyz + data_dict['seed_features'] = features + + xyz, features = self.vgen(xyz, features) + features_norm = torch.norm(features, p=2, dim=1) + features = features.div(features_norm.unsqueeze(1)) + data_dict['vote_xyz'] = xyz + data_dict['vote_features'] = features + + # --------- PROPOSAL GENERATION --------- + data_dict = self.proposal(xyz, features, data_dict) + + if not self.no_reference: + ####################################### + # # + # LANGUAGE BRANCH # + # # + ####################################### + + # --------- LANGUAGE ENCODING --------- + data_dict = self.lang(data_dict) + + ####################################### + # # + # PROPOSAL MATCHING # + # # + ####################################### + + # --------- PROPOSAL MATCHING --------- + + # give all the scores + data_dict = self.match(data_dict) + + return data_dict diff --git a/models/Scanrefer/models/voting_module.py b/models/Scanrefer/models/voting_module.py new file mode 100644 index 0000000..6fd4cf5 --- /dev/null +++ b/models/Scanrefer/models/voting_module.py @@ -0,0 +1,76 @@ +"""Voting module: generate votes from XYZ and features of seed points. + +Modified from: https://github.com/facebookresearch/votenet/blob/master/models/voting_module.py +""" + +import torch +import torch.nn as nn +import torch.nn.functional as F + + +class VotingModule(nn.Module): + + def __init__(self, vote_factor, seed_feature_dim): + """Votes generation from seed point features. + + Args: + vote_facotr: int + number of votes generated from each seed point + seed_feature_dim: int + number of channels of seed point features + vote_feature_dim: int + number of channels of vote features + """ + super().__init__() + self.vote_factor = vote_factor + self.in_dim = seed_feature_dim + self.out_dim = self.in_dim # due to residual feature, in_dim has to be == out_dim + self.conv1 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1) + self.conv2 = torch.nn.Conv1d(self.in_dim, self.in_dim, 1) + self.conv3 = torch.nn.Conv1d(self.in_dim, + (3 + self.out_dim) * self.vote_factor, 1) + self.bn1 = torch.nn.BatchNorm1d(self.in_dim) + self.bn2 = torch.nn.BatchNorm1d(self.in_dim) + + def forward(self, seed_xyz, seed_features): + """Forward pass. + + Arguments: + seed_xyz: (batch_size, num_seed, 3) Pytorch tensor + seed_features: (batch_size, feature_dim, num_seed) Pytorch tensor + Returns: + vote_xyz: (batch_size, num_seed*vote_factor, 3) + vote_features: (batch_size, vote_feature_dim, num_seed*vote_factor) + """ + batch_size = seed_xyz.shape[0] + num_seed = seed_xyz.shape[1] + num_vote = num_seed * self.vote_factor + net = F.relu(self.bn1(self.conv1(seed_features))) + net = F.relu(self.bn2(self.conv2(net))) + net = self.conv3( + net) # (batch_size, (3+out_dim)*vote_factor, num_seed) + + net = net.transpose(2, 1).view(batch_size, num_seed, self.vote_factor, + 3 + self.out_dim).contiguous() + offset = net[:, :, :, 0:3] + vote_xyz = seed_xyz.unsqueeze(2) + offset + vote_xyz = vote_xyz.contiguous().view(batch_size, num_vote, 3) + + residual_features = net[:, :, :, + 3:] # (batch_size, num_seed, vote_factor, out_dim) + vote_features = seed_features.transpose( + 2, 1).unsqueeze(2).contiguous() + residual_features + vote_features = vote_features.contiguous().view( + batch_size, num_vote, self.out_dim) + vote_features = vote_features.transpose(2, 1).contiguous() + + return vote_xyz, vote_features + + +if __name__ == '__main__': + net = VotingModule(2, 256).cuda() + xyz, features = net( + torch.rand(8, 1024, 3).cuda(), + torch.rand(8, 256, 1024).cuda()) + print('xyz', xyz.shape) + print('features', features.shape) diff --git a/models/Scanrefer/requirements.txt b/models/Scanrefer/requirements.txt new file mode 100644 index 0000000..ec560f2 --- /dev/null +++ b/models/Scanrefer/requirements.txt @@ -0,0 +1,8 @@ +easydict +h5py +matplotlib +opencv-python +plyfile +tensorboardX +tqdm +trimesh==2.35.39 diff --git a/models/Scanrefer/scripts/compute_multiview_features.py b/models/Scanrefer/scripts/compute_multiview_features.py new file mode 100644 index 0000000..0cae007 --- /dev/null +++ b/models/Scanrefer/scripts/compute_multiview_features.py @@ -0,0 +1,140 @@ +import argparse +import math +import os +import sys + +import numpy as np +import torch +import torch.nn as nn +import torchvision.transforms as transforms +from imageio import imread +from PIL import Image +from torch.utils.data import DataLoader, Dataset +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from lib.config import CONF +from lib.enet import create_enet_for_3d + +# scannet data +# NOTE: read only! +SCANNET_FRAME_ROOT = CONF.SCANNET_FRAMES +SCANNET_FRAME_PATH = os.path.join(SCANNET_FRAME_ROOT, '{}') # name of the file +SCANNET_LIST = CONF.SCANNETV2_LIST + +ENET_PATH = CONF.ENET_WEIGHTS +ENET_FEATURE_ROOT = CONF.ENET_FEATURES_SUBROOT +ENET_FEATURE_PATH = CONF.ENET_FEATURES_PATH + + +class EnetDataset(Dataset): + + def __init__(self): + self._init_resources() + + def __len__(self): + return len(self.data) + + def __getitem__(self, idx): + scene_id, frame_id = self.data[idx] + image = self._load_image( + SCANNET_FRAME_PATH.format(scene_id, 'color', + '{}.jpg'.format(frame_id)), [328, 256]) + + return scene_id, frame_id, image + + def _init_resources(self): + self._get_scene_list() + self.data = [] + for scene_id in self.scene_list: + frame_list = sorted(os.listdir( + SCANNET_FRAME_ROOT.format(scene_id, 'color')), + key=lambda x: int(x.split('.')[0])) + for frame_file in frame_list: + self.data.append((scene_id, int(frame_file.split('.')[0]))) + + def _get_scene_list(self): + with open(SCANNET_LIST, 'r') as f: + self.scene_list = sorted(list(set(f.read().splitlines()))) + + def _resize_crop_image(self, image, new_image_dims): + image_dims = [image.shape[1], image.shape[0]] + if image_dims != new_image_dims: + resize_width = int( + math.floor(new_image_dims[1] * float(image_dims[0]) / + float(image_dims[1]))) + image = transforms.Resize([new_image_dims[1], resize_width], + interpolation=Image.NEAREST)( + Image.fromarray(image)) + image = transforms.CenterCrop( + [new_image_dims[1], new_image_dims[0]])(image) + + return np.array(image) + + def _load_image(self, file, image_dims): + image = imread(file) + # preprocess + image = self._resize_crop_image(image, image_dims) + if len(image.shape) == 3: # color image + image = np.transpose(image, [2, 0, 1]) # move feature to front + image = transforms.Normalize( + mean=[0.496342, 0.466664, 0.440796], + std=[0.277856, 0.28623, + 0.291129])(torch.Tensor(image.astype(np.float32) / 255.0)) + elif len(image.shape) == 2: # label image + image = np.expand_dims(image, 0) + else: + raise ValueError + + return image + + def collate_fn(self, data): + scene_ids, frame_ids, images = zip(*data) + scene_ids = list(scene_ids) + frame_ids = list(frame_ids) + images = torch.stack(images, 0).cuda() + + return scene_ids, frame_ids, images + + +def create_enet(): + enet_fixed, enet_trainable, _ = create_enet_for_3d(41, ENET_PATH, 21) + enet = nn.Sequential(enet_fixed, enet_trainable).cuda() + enet.eval() + for param in enet.parameters(): + param.requires_grad = False + + return enet + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, help='gpu', default='0') + args = parser.parse_args() + + # setting + os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu + os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + # init + dataset = EnetDataset() + dataloader = DataLoader(dataset, + batch_size=256, + shuffle=False, + collate_fn=dataset.collate_fn) + enet = create_enet() + + # feed + print('extracting multiview features from ENet...') + for scene_ids, frame_ids, images in tqdm(dataloader): + features = enet(images) + batch_size = images.shape[0] + for batch_id in range(batch_size): + os.makedirs(ENET_FEATURE_ROOT.format(scene_ids[batch_id]), + exist_ok=True) + np.save( + ENET_FEATURE_PATH.format(scene_ids[batch_id], + frame_ids[batch_id]), + features[batch_id].cpu().numpy()) + + print('done!') diff --git a/models/Scanrefer/scripts/project_multiview_features.py b/models/Scanrefer/scripts/project_multiview_features.py new file mode 100644 index 0000000..cfcc88d --- /dev/null +++ b/models/Scanrefer/scripts/project_multiview_features.py @@ -0,0 +1,251 @@ +import argparse +import math +import os +import sys + +import h5py +import numpy as np +import torch +import torch.nn as nn +import torchvision.transforms as transforms +from imageio import imread +from PIL import Image +from plyfile import PlyData, PlyElement +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from lib.config import CONF +from lib.projection import ProjectionHelper + +SCANNET_LIST = CONF.SCANNETV2_LIST +SCANNET_DATA = CONF.PATH.SCANNET_DATA +SCANNET_FRAME_ROOT = CONF.SCANNET_FRAMES +SCANNET_FRAME_PATH = os.path.join(SCANNET_FRAME_ROOT, '{}') # name of the file + +ENET_FEATURE_PATH = CONF.ENET_FEATURES_PATH +ENET_FEATURE_DATABASE = CONF.MULTIVIEW + +# projection +INTRINSICS = [[37.01983, 0, 20, 0], [0, 38.52470, 15.5, 0], [0, 0, 1, 0], + [0, 0, 0, 1]] +PROJECTOR = ProjectionHelper(INTRINSICS, 0.1, 4.0, [41, 32], 0.05) + + +def get_scene_list(): + with open(SCANNET_LIST, 'r') as f: + return sorted(list(set(f.read().splitlines()))) + + +def to_tensor(arr): + return torch.Tensor(arr).cuda() + + +def resize_crop_image(image, new_image_dims): + image_dims = [image.shape[1], image.shape[0]] + if image_dims == new_image_dims: + return image + resize_width = int( + math.floor(new_image_dims[1] * float(image_dims[0]) / + float(image_dims[1]))) + image = transforms.Resize([new_image_dims[1], resize_width], + interpolation=Image.NEAREST)( + Image.fromarray(image)) + image = transforms.CenterCrop([new_image_dims[1], + new_image_dims[0]])(image) + image = np.array(image) + + return image + + +def load_image(file, image_dims): + image = imread(file) + # preprocess + image = resize_crop_image(image, image_dims) + if len(image.shape) == 3: # color image + image = np.transpose(image, [2, 0, 1]) # move feature to front + image = transforms.Normalize( + mean=[0.496342, 0.466664, 0.440796], + std=[0.277856, 0.28623, + 0.291129])(torch.Tensor(image.astype(np.float32) / 255.0)) + elif len(image.shape) == 2: # label image + # image = np.expand_dims(image, 0) + pass + else: + raise + + return image + + +def load_pose(filename): + lines = open(filename).read().splitlines() + assert len(lines) == 4 + lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(' ') for x in lines)] + + return np.asarray(lines).astype(np.float32) + + +def load_depth(file, image_dims): + depth_image = imread(file) + # preprocess + depth_image = resize_crop_image(depth_image, image_dims) + depth_image = depth_image.astype(np.float32) / 1000.0 + + return depth_image + + +def get_scene_data(scene_list): + scene_data = {} + for scene_id in scene_list: + # load the original vertices, not the axis-aligned ones + scene_data[scene_id] = np.load( + os.path.join(SCANNET_DATA, scene_id) + '_vert.npy')[:, :3] + + return scene_data + + +def compute_projection(points, depth, camera_to_world): + """ + :param points: tensor containing all points of the point cloud (num_points, 3) + :param depth: depth map (size: proj_image) + :param camera_to_world: camera pose (4, 4) + + :return indices_3d (array with point indices that correspond to a pixel), + :return indices_2d (array with pixel indices that correspond to a point) + + note: + the first digit of indices represents the number of relevant points + the rest digits are for the projection mapping + """ + num_points = points.shape[0] + num_frames = depth.shape[0] + indices_3ds = torch.zeros(num_frames, num_points + 1).long().cuda() + indices_2ds = torch.zeros(num_frames, num_points + 1).long().cuda() + + for i in range(num_frames): + indices = PROJECTOR.compute_projection(to_tensor(points), + to_tensor(depth[i]), + to_tensor(camera_to_world[i])) + if indices: + indices_3ds[i] = indices[0].long() + indices_2ds[i] = indices[1].long() + print('found {} mappings in {} points from frame {}'.format( + indices_3ds[i][0], num_points, i)) + + return indices_3ds, indices_2ds + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--gpu', type=str, help='gpu', default='0') + parser.add_argument('--maxpool', + action='store_true', + help='use max pooling to aggregate features \ + (use majority voting in label projection mode)') + args = parser.parse_args() + + # setting + os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu + os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + scene_list = get_scene_list() + scene_data = get_scene_data(scene_list) + with h5py.File(ENET_FEATURE_DATABASE, 'w', libver='latest') as database: + print('projecting multiview features to point cloud...') + for scene_id in scene_list: + print('processing {}...'.format(scene_id)) + scene = scene_data[scene_id] + # load frames + frame_list = list( + map( + lambda x: x.split('.')[0], + sorted( + os.listdir(SCANNET_FRAME_ROOT.format( + scene_id, 'color'))))) + scene_images = np.zeros((len(frame_list), 3, 256, 328)) + scene_depths = np.zeros((len(frame_list), 32, 41)) + scene_poses = np.zeros((len(frame_list), 4, 4)) + for i, frame_id in enumerate(frame_list): + scene_images[i] = load_image( + SCANNET_FRAME_PATH.format(scene_id, 'color', + '{}.jpg'.format(frame_id)), + [328, 256]) + scene_depths[i] = load_depth( + SCANNET_FRAME_PATH.format(scene_id, 'depth', + '{}.png'.format(frame_id)), + [41, 32]) + scene_poses[i] = load_pose( + SCANNET_FRAME_PATH.format(scene_id, 'pose', + '{}.txt'.format(frame_id))) + + # compute projections for each chunk + projection_3d, projection_2d = compute_projection( + scene, scene_depths, scene_poses) + + # compute valid projections + projections = [] + for i in range(projection_3d.shape[0]): + num_valid = projection_3d[i, 0] + if num_valid == 0: + continue + + projections.append( + (frame_list[i], projection_3d[i], projection_2d[i])) + + # # project + # point_features = to_tensor(scene).new(scene.shape[0], 128).fill_(0) + # for i, projection in enumerate(projections): + # frame_id = projection[0] + # projection_3d = projection[1] + # projection_2d = projection[2] + # feat = to_tensor(np.load(ENET_FEATURE_PATH.format(scene_id, frame_id))) + # proj_feat = PROJECTOR.project(feat, projection_3d, projection_2d, scene.shape[0]).transpose(1, 0) + # if i == 0: + # point_features = proj_feat + # else: + # mask = ((point_features == 0).sum(1) == 128).nonzero().squeeze(1) + # point_features[mask] = proj_feat[mask] + + # project + point_features = to_tensor(scene).new(scene.shape[0], 128).fill_(0) + for i, projection in enumerate(projections): + frame_id = projection[0] + projection_3d = projection[1] + projection_2d = projection[2] + feat = to_tensor( + np.load(ENET_FEATURE_PATH.format(scene_id, frame_id))) + + proj_feat = PROJECTOR.project(feat, projection_3d, + projection_2d, + scene.shape[0]).transpose(1, 0) + + if args.maxpool: + # only apply max pooling on the overlapping points + # find out the points that are covered in projection + feat_mask = ((proj_feat == 0).sum(1) != 128).bool() + # find out the points that are not filled with features + point_mask = ((point_features == 0).sum(1) == 128).bool() + + # for the points that are not filled with features + # and are covered in projection, + # simply fill those points with projected features + mask = point_mask * feat_mask + point_features[mask] = proj_feat[mask] + + # for the points that have already been filled with features + # and are covered in projection, + # apply max pooling first and then fill with pooled values + mask = ~point_mask * feat_mask + point_features[mask] = torch.max(point_features[mask], + proj_feat[mask]) + else: + if i == 0: + point_features = proj_feat + else: + mask = (point_features == 0).sum(1) == 128 + point_features[mask] = proj_feat[mask] + + # save + database.create_dataset(scene_id, + data=point_features.cpu().numpy()) + + print('done!') diff --git a/models/Scanrefer/scripts/project_multiview_labels.py b/models/Scanrefer/scripts/project_multiview_labels.py new file mode 100644 index 0000000..c8b40bb --- /dev/null +++ b/models/Scanrefer/scripts/project_multiview_labels.py @@ -0,0 +1,384 @@ +import argparse +import math +import os +import sys +from collections import Counter + +import h5py +import numpy as np +import pandas as pd +import torch +import torch.nn as nn +import torchvision.transforms as transforms +from imageio import imread +from PIL import Image +from plyfile import PlyData, PlyElement +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from lib.config import CONF +from lib.enet import create_enet_for_3d +from lib.projection import ProjectionHelper + +SCANNET_LIST = CONF.SCANNETV2_LIST +SCANNET_DATA = CONF.PATH.SCANNET_DATA +SCANNET_FRAME_ROOT = CONF.SCANNET_FRAMES +SCANNET_FRAME_PATH = os.path.join(SCANNET_FRAME_ROOT, '{}') # name of the file + +ENET_FEATURE_PATH = CONF.ENET_FEATURES_PATH +ENET_FEATURE_DATABASE = CONF.MULTIVIEW + +# projection +INTRINSICS = [[37.01983, 0, 20, 0], [0, 38.52470, 15.5, 0], [0, 0, 1, 0], + [0, 0, 0, 1]] +PROJECTOR = ProjectionHelper(INTRINSICS, 0.1, 4.0, [41, 32], 0.05) + +ENET_PATH = CONF.ENET_WEIGHTS +ENET_GT_PATH = SCANNET_FRAME_PATH + +NYU40_LABELS = CONF.NYU40_LABELS +SCANNET_LABELS = [ + 'unannotated', 'wall', 'floor', 'chair', 'table', 'desk', 'bed', + 'bookshelf', 'sofa', 'sink', 'bathtub', 'toilet', 'curtain', 'counter', + 'door', 'window', 'shower curtain', 'refridgerator', 'picture', 'cabinet', + 'otherfurniture' +] + +PC_LABEL_ROOT = os.path.join(CONF.PATH.OUTPUT, 'projections') +PC_LABEL_PATH = os.path.join(PC_LABEL_ROOT, '{}.ply') + + +def get_nyu40_labels(): + labels = ['unannotated'] + labels += pd.read_csv(NYU40_LABELS)['nyu40class'].tolist() + + return labels + + +def get_prediction_to_raw(): + labels = get_nyu40_labels() + mapping = {i: label for i, label in enumerate(labels)} + + return mapping + + +def get_nyu_to_scannet(): + nyu_idx_to_nyu_label = get_prediction_to_raw() + scannet_label_to_scannet_idx = { + label: i + for i, label in enumerate(SCANNET_LABELS) + } + + # mapping + nyu_to_scannet = {} + for nyu_idx in range(41): + nyu_label = nyu_idx_to_nyu_label[nyu_idx] + if nyu_label in scannet_label_to_scannet_idx.keys(): + scannet_idx = scannet_label_to_scannet_idx[nyu_label] + else: + scannet_idx = 0 + nyu_to_scannet[nyu_idx] = scannet_idx + + return nyu_to_scannet + + +def create_color_palette(): + return { + 'unannotated': (0, 0, 0), + 'floor': (152, 223, 138), + 'wall': (174, 199, 232), + 'cabinet': (31, 119, 180), + 'bed': (255, 187, 120), + 'chair': (188, 189, 34), + 'sofa': (140, 86, 75), + 'table': (255, 152, 150), + 'door': (214, 39, 40), + 'window': (197, 176, 213), + 'bookshelf': (148, 103, 189), + 'picture': (196, 156, 148), + 'counter': (23, 190, 207), + 'desk': (247, 182, 210), + 'curtain': (219, 219, 141), + 'refridgerator': (255, 127, 14), + 'bathtub': (227, 119, 194), + 'shower curtain': (158, 218, 229), + 'toilet': (44, 160, 44), + 'sink': (112, 128, 144), + 'otherfurniture': (82, 84, 163), + } + + +def get_scene_list(args): + if args.scene_id == '-1': + with open(SCANNET_LIST, 'r') as f: + return sorted(list(set(f.read().splitlines()))) + else: + return [args.scene_id] + + +def to_tensor(arr): + return torch.Tensor(arr).cuda() + + +def resize_crop_image(image, new_image_dims): + image_dims = [image.shape[1], image.shape[0]] + if image_dims == new_image_dims: + return image + resize_width = int( + math.floor(new_image_dims[1] * float(image_dims[0]) / + float(image_dims[1]))) + image = transforms.Resize([new_image_dims[1], resize_width], + interpolation=Image.NEAREST)( + Image.fromarray(image)) + image = transforms.CenterCrop([new_image_dims[1], + new_image_dims[0]])(image) + image = np.array(image) + + return image + + +def load_image(file, image_dims): + image = imread(file) + # preprocess + image = resize_crop_image(image, image_dims) + if len(image.shape) == 3: # color image + image = np.transpose(image, [2, 0, 1]) # move feature to front + image = transforms.Normalize( + mean=[0.496342, 0.466664, 0.440796], + std=[0.277856, 0.28623, + 0.291129])(torch.Tensor(image.astype(np.float32) / 255.0)) + elif len(image.shape) == 2: # label image + # image = np.expand_dims(image, 0) + pass + else: + raise + + return image + + +def load_pose(filename): + lines = open(filename).read().splitlines() + assert len(lines) == 4 + lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(' ') for x in lines)] + + return np.asarray(lines).astype(np.float32) + + +def load_depth(file, image_dims): + depth_image = imread(file) + # preprocess + depth_image = resize_crop_image(depth_image, image_dims) + depth_image = depth_image.astype(np.float32) / 1000.0 + + return depth_image + + +def visualize(coords, labels): + palette = create_color_palette() + nyu_to_scannet = get_nyu_to_scannet() + vertex = [] + for i in range(coords.shape[0]): + vertex.append((coords[i][0], coords[i][1], coords[i][2], + palette[SCANNET_LABELS[nyu_to_scannet[labels[i]]]][0], + palette[SCANNET_LABELS[nyu_to_scannet[labels[i]]]][1], + palette[SCANNET_LABELS[nyu_to_scannet[labels[i]]]][2])) + + vertex = np.array(vertex, + dtype=[('x', np.dtype('float32')), + ('y', np.dtype('float32')), + ('z', np.dtype('float32')), + ('red', np.dtype('uint8')), + ('green', np.dtype('uint8')), + ('blue', np.dtype('uint8'))]) + + output_pc = PlyElement.describe(vertex, 'vertex') + output_pc = PlyData([output_pc]) + os.makedirs(PC_LABEL_ROOT, exist_ok=True) + output_pc.write(PC_LABEL_PATH.format(args.scene_id)) + + +def get_scene_data(scene_list): + scene_data = {} + for scene_id in scene_list: + scene_data[scene_id] = {} + scene_data[scene_id] = np.load( + os.path.join(SCANNET_DATA, scene_id) + '_vert.npy')[:, :3] + + return scene_data + + +def compute_projection(points, depth, camera_to_world): + """ + :param points: tensor containing all points of the point cloud (num_points, 3) + :param depth: depth map (size: proj_image) + :param camera_to_world: camera pose (4, 4) + + :return indices_3d (array with point indices that correspond to a pixel), + :return indices_2d (array with pixel indices that correspond to a point) + + note: + the first digit of indices represents the number of relevant points + the rest digits are for the projection mapping + """ + num_points = points.shape[0] + num_frames = depth.shape[0] + indices_3ds = torch.zeros(num_frames, num_points + 1).long().cuda() + indices_2ds = torch.zeros(num_frames, num_points + 1).long().cuda() + + for i in range(num_frames): + indices = PROJECTOR.compute_projection(to_tensor(points), + to_tensor(depth[i]), + to_tensor(camera_to_world[i])) + if indices: + indices_3ds[i] = indices[0].long() + indices_2ds[i] = indices[1].long() + + return indices_3ds, indices_2ds + + +def create_enet(): + enet_fixed, enet_trainable, enet_classifier = create_enet_for_3d( + 41, ENET_PATH, 21) + enet = nn.Sequential(enet_fixed, enet_trainable, enet_classifier).cuda() + enet.eval() + for param in enet.parameters(): + param.requires_grad = False + + return enet + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--scene_id', type=str, default='-1') + parser.add_argument('--gt', action='store_true') + parser.add_argument('--maxpool', + action='store_true', + help='use max pooling to aggregate features \ + (use majority voting in label projection mode)') + args = parser.parse_args() + + scene_list = get_scene_list(args) + scene_data = get_scene_data(scene_list) + enet = create_enet() + for scene_id in tqdm(scene_list): + scene = scene_data[scene_id] + # load frames + frame_list = list( + map( + lambda x: x.split('.')[0], + sorted(os.listdir(SCANNET_FRAME_ROOT.format(scene_id, + 'color'))))) + scene_images = np.zeros((len(frame_list), 3, 256, 328)) + scene_depths = np.zeros((len(frame_list), 32, 41)) + scene_poses = np.zeros((len(frame_list), 4, 4)) + for i, frame_id in enumerate(frame_list): + scene_images[i] = load_image( + SCANNET_FRAME_PATH.format(scene_id, 'color', + '{}.jpg'.format(frame_id)), + [328, 256]) + scene_depths[i] = load_depth( + SCANNET_FRAME_PATH.format(scene_id, 'depth', + '{}.png'.format(frame_id)), [41, 32]) + scene_poses[i] = load_pose( + SCANNET_FRAME_PATH.format(scene_id, 'pose', + '{}.txt'.format(frame_id))) + + # compute projections for each chunk + projection_3d, projection_2d = compute_projection( + scene, scene_depths, scene_poses) + + # compute valid projections + projections = [] + for i in range(projection_3d.shape[0]): + num_valid = projection_3d[i, 0] + if num_valid == 0: + continue + + projections.append( + (frame_list[i], projection_3d[i], projection_2d[i])) + + # # project + # labels = None + # for i, projection in enumerate(projections): + # frame_id = projection[0] + # projection_3d = projection[1] + # projection_2d = projection[2] + # if args.gt: + # feat = to_tensor(load_image(ENET_GT_PATH.format(scene_id, "labelv2", "{}.png".format(frame_id)), [41, 32])).unsqueeze(0) + # else: + # image = load_image(SCANNET_FRAME_PATH.format(scene_id, "color", "{}.jpg".format(frame_id)), [328, 256]) + # feat = enet(to_tensor(image).unsqueeze(0)).max(1)[1].unsqueeze(1) + + # proj_label = PROJECTOR.project(feat, projection_3d, projection_2d, scene.shape[0]).transpose(1, 0) + # if i == 0: + # labels = proj_label + # else: + # labels[labels == 0] = proj_label[labels == 0] + + # project + labels = to_tensor(scene).new(scene.shape[0], + len(projections)).fill_(0).long() + for i, projection in enumerate(projections): + frame_id = projection[0] + projection_3d = projection[1] + projection_2d = projection[2] + + if args.gt: + feat = to_tensor( + load_image( + ENET_GT_PATH.format(scene_id, 'labelv2', + '{}.png'.format(frame_id)), + [41, 32])).unsqueeze(0) + else: + image = load_image( + SCANNET_FRAME_PATH.format(scene_id, 'color', + '{}.jpg'.format(frame_id)), + [328, 256]) + feat = enet( + to_tensor(image).unsqueeze(0)).max(1)[1].unsqueeze(1) + + proj_label = PROJECTOR.project(feat, projection_3d, projection_2d, + scene.shape[0]).transpose( + 1, 0) # num_points, 1 + + if args.maxpool: + # only apply max pooling on the overlapping points + # find out the points that are covered in projection + feat_mask = ((proj_label == 0).sum(1) != 1).bool() + # find out the points that are not filled with labels + point_mask = ((labels == 0).sum(1) == len(projections)).bool() + + # for the points that are not filled with features + # and are covered in projection, + # simply fill those points with labels + mask = point_mask * feat_mask + labels[mask, i] = proj_label[mask, 0] + + # for the points that have already been filled with features + # and are covered in projection, + # simply fill those points with labels + mask = ~point_mask * feat_mask + labels[mask, i] = proj_label[mask, 0] + else: + if i == 0: + labels = proj_label + else: + labels[labels == 0] = proj_label[labels == 0] + + # aggregate + if args.maxpool: + new_labels = [] + for label_id in range(labels.shape[0]): + point_label = labels[label_id].cpu().numpy().tolist() + count = dict(Counter(point_label)) + count = sorted(count.items(), key=lambda x: x[1], reverse=True) + count = [c for c in count if c[0] != 0] + if count: + new_labels.append(count[0][0]) + else: + new_labels.append(0) + + labels = torch.FloatTensor(np.array(new_labels)[:, np.newaxis]) + + # output + visualize(scene, labels.long().squeeze(1).cpu().numpy()) diff --git a/models/Scanrefer/scripts/train.py b/models/Scanrefer/scripts/train.py new file mode 100644 index 0000000..0443bf4 --- /dev/null +++ b/models/Scanrefer/scripts/train.py @@ -0,0 +1,343 @@ +import argparse +import importlib +import json +import os +import sys +from copy import deepcopy +from datetime import datetime + +import h5py +import numpy as np +import torch +import torch.nn as nn +import torch.optim as optim +from torch.utils.data import DataLoader + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from data.scannet.model_util_scannet import ScannetDatasetConfig +from lib.config import CONF +from lib.dataset import ScannetReferenceDataset +from lib.solver import Solver +from models.refnet import RefNet + +# SCANREFER_TRAIN = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_train.json"))) +# SCANREFER_VAL = json.load(open(os.path.join(CONF.PATH.DATA, "ScanRefer_filtered_val.json"))) + +# constants +DC = ScannetDatasetConfig() + + +def get_dataloader(args, split, config, augment): + dataset = ScannetReferenceDataset(es_info_file='', + vg_raw_data_file='', + split=split, + num_points=args.num_points, + use_height=(not args.no_height), + use_color=args.use_color, + use_normal=args.use_normal, + use_multiview=args.use_multiview) + # dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=True) + dataloader = DataLoader(dataset, + batch_size=args.batch_size, + shuffle=True, + num_workers=4) + + return dataset, dataloader + + +def get_model(args): + # initiate model + input_channels = int(args.use_multiview) * 128 + int( + args.use_normal) * 3 + int( + args.use_color) * 3 + int(not args.no_height) + model = RefNet(num_class=DC.num_class, + num_heading_bin=DC.num_heading_bin, + num_size_cluster=DC.num_size_cluster, + mean_size_arr=DC.mean_size_arr, + input_feature_dim=input_channels, + num_proposal=args.num_proposals, + use_lang_classifier=(not args.no_lang_cls), + use_bidir=args.use_bidir, + no_reference=args.no_reference) + + # trainable model + if args.use_pretrained: + # load model + print('loading pretrained VoteNet...') + pretrained_model = RefNet(num_class=DC.num_class, + num_heading_bin=DC.num_heading_bin, + num_size_cluster=DC.num_size_cluster, + mean_size_arr=DC.mean_size_arr, + num_proposal=args.num_proposals, + input_feature_dim=input_channels, + use_bidir=args.use_bidir, + no_reference=True) + + pretrained_path = os.path.join(CONF.PATH.OUTPUT, args.use_pretrained, + 'model_last.pth') + pretrained_model.load_state_dict(torch.load(pretrained_path), + strict=False) + + # mount + model.backbone_net = pretrained_model.backbone_net + model.vgen = pretrained_model.vgen + model.proposal = pretrained_model.proposal + + if args.no_detection: + # freeze pointnet++ backbone + for param in model.backbone_net.parameters(): + param.requires_grad = False + + # freeze voting + for param in model.vgen.parameters(): + param.requires_grad = False + + # freeze detector + for param in model.proposal.parameters(): + param.requires_grad = False + + # to CUDA + model = model.cuda() + + return model + + +def get_num_params(model): + model_parameters = filter(lambda p: p.requires_grad, model.parameters()) + num_params = int(sum([np.prod(p.size()) for p in model_parameters])) + + return num_params + + +def get_solver(args, dataloader): + model = get_model(args) + optimizer = optim.Adam(model.parameters(), + lr=args.lr, + weight_decay=args.wd) + + if args.use_checkpoint: + print('loading checkpoint {}...'.format(args.use_checkpoint)) + stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + root = os.path.join(CONF.PATH.OUTPUT, stamp) + checkpoint = torch.load(args.use_checkpoint) + if args.use_checkpoint == 'ckpt/xyzrgb.pth': + checkpoint.pop('proposal.proposal.6.weight') + checkpoint.pop('proposal.proposal.6.bias') + checkpoint.pop('lang.lang_cls.0.weight') + checkpoint.pop('lang.lang_cls.0.bias') + model.load_state_dict(checkpoint, strict=False) + os.makedirs(root, exist_ok=True) + # optimizer.load_state_dict(checkpoint["optimizer_state_dict"]) + else: + stamp = datetime.now().strftime('%Y-%m-%d_%H-%M-%S') + if args.tag: stamp += '_' + args.tag.upper() + root = os.path.join(CONF.PATH.OUTPUT, stamp) + os.makedirs(root, exist_ok=True) + + # scheduler parameters for training solely the detection pipeline + LR_DECAY_STEP = [80, 120, 160] if args.no_reference else None + LR_DECAY_RATE = 0.1 if args.no_reference else None + BN_DECAY_STEP = 20 if args.no_reference else None + BN_DECAY_RATE = 0.5 if args.no_reference else None + + solver = Solver( + model=model, + config=DC, + dataloader=dataloader, + optimizer=optimizer, + stamp=stamp, + val_step=args.val_step, + detection=not args.no_detection, + reference=not args.no_reference, + use_lang_classifier=not args.no_lang_cls, + lr_decay_step=LR_DECAY_STEP, + lr_decay_rate=LR_DECAY_RATE, + bn_decay_step=BN_DECAY_STEP, + bn_decay_rate=BN_DECAY_RATE, + eval_only=args.eval_only, + ) + num_params = get_num_params(model) + + return solver, num_params, root + + +def save_info(args, root, num_params, train_dataset, val_dataset): + info = {} + for key, value in vars(args).items(): + info[key] = value + + info['num_train'] = len(train_dataset) + info['num_val'] = len(val_dataset) + + info['num_params'] = num_params + + with open(os.path.join(root, 'info.json'), 'w') as f: + json.dump(info, f, indent=4) + + +def get_scannet_scene_list(split): + scene_list = sorted([ + line.rstrip() for line in open( + os.path.join(CONF.PATH.SCANNET_META, 'scannetv2_{}.txt'.format( + split))) + ]) + # es_mod, for debugging + scene_list = scene_list[:len(scene_list) // 20] + print(len(scene_list)) + return scene_list + + +# def get_scanrefer(scanrefer_train, scanrefer_val, num_scenes): +# if args.no_reference: +# train_scene_list = get_scannet_scene_list("train") +# new_scanrefer_train = [] +# for scene_id in train_scene_list: +# data = deepcopy(SCANREFER_TRAIN[0]) +# data["scene_id"] = scene_id +# new_scanrefer_train.append(data) + +# val_scene_list = get_scannet_scene_list("val") +# new_scanrefer_val = [] +# for scene_id in val_scene_list: +# data = deepcopy(SCANREFER_VAL[0]) +# data["scene_id"] = scene_id +# new_scanrefer_val.append(data) +# else: +# # get initial scene list +# train_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_train]))) +# val_scene_list = sorted(list(set([data["scene_id"] for data in scanrefer_val]))) +# if num_scenes == -1: +# num_scenes = len(train_scene_list) +# else: +# assert len(train_scene_list) >= num_scenes + +# # slice train_scene_list +# train_scene_list = train_scene_list[:num_scenes] + +# # filter data in chosen scenes +# new_scanrefer_train = [] +# for data in scanrefer_train: +# if data["scene_id"] in train_scene_list: +# new_scanrefer_train.append(data) + +# new_scanrefer_val = scanrefer_val + +# # all scanrefer scene +# all_scene_list = train_scene_list + val_scene_list + +# print("train on {} samples and val on {} samples".format(len(new_scanrefer_train), len(new_scanrefer_val))) +# print("the above might not be true for es experiments") +# return new_scanrefer_train, new_scanrefer_val, all_scene_list + + +def train(args): + # init training dataset + print('preparing data...') + # scanrefer_train, scanrefer_val, all_scene_list = get_scanrefer(SCANREFER_TRAIN, SCANREFER_VAL, args.num_scenes) + # scanrefer = { + # "train": scanrefer_train, + # "val": scanrefer_val + # } + + # dataloader + train_dataset, train_dataloader = get_dataloader(args, 'train', DC, False) + val_dataset, val_dataloader = get_dataloader(args, 'val', DC, False) + dataloader = {'train': train_dataloader, 'val': val_dataloader} + + print('initializing...') + solver, num_params, root = get_solver(args, dataloader) + + print('Start training...\n') + save_info(args, root, num_params, train_dataset, val_dataset) + solver(args.epoch, args.verbose) + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--tag', + type=str, + help='tag for the training, e.g. cuda_wl', + default='') + parser.add_argument('--gpu', type=str, help='gpu', default='0') + parser.add_argument('--batch_size', + type=int, + help='batch size', + default=32) + parser.add_argument('--epoch', + type=int, + help='number of epochs', + default=50) + parser.add_argument('--verbose', + type=int, + help='iterations of showing verbose', + default=10) + parser.add_argument('--val_step', + type=int, + help='iterations of validating', + default=5000) + parser.add_argument('--lr', type=float, help='learning rate', default=1e-3) + parser.add_argument('--wd', type=float, help='weight decay', default=1e-5) + parser.add_argument('--num_points', + type=int, + default=40000, + help='Point Number [default: 40000]') + parser.add_argument('--num_proposals', + type=int, + default=256, + help='Proposal number [default: 256]') + parser.add_argument('--num_scenes', + type=int, + default=-1, + help='Number of scenes [default: -1]') + parser.add_argument('--seed', type=int, default=42, help='random seed') + parser.add_argument('--no_height', + action='store_true', + help='Do NOT use height signal in input.') + parser.add_argument('--no_augment', + action='store_true', + help='Do NOT use height signal in input.') + parser.add_argument('--no_lang_cls', + action='store_true', + help='Do NOT use language classifier.') + parser.add_argument('--no_detection', + action='store_true', + help='Do NOT train the detection module.') + parser.add_argument('--no_reference', + action='store_true', + help='Do NOT train the localization module.') + parser.add_argument('--use_color', + action='store_true', + help='Use RGB color in input.') + parser.add_argument('--use_normal', + action='store_true', + help='Use RGB color in input.') + parser.add_argument('--use_multiview', + action='store_true', + help='Use multiview images.') + parser.add_argument('--use_bidir', + action='store_true', + help='Use bi-directional GRU.') + parser.add_argument( + '--use_pretrained', + type=str, + help= + 'Specify the folder name containing the pretrained detection module.') + parser.add_argument('--use_checkpoint', + type=str, + help='Specify the checkpoint root', + default='') + parser.add_argument('--eval_only', + action='store_true', + help='Only evaluate the model.') + args = parser.parse_args() + + # setting + os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + # reproducibility + torch.manual_seed(args.seed) + torch.backends.cudnn.deterministic = True + torch.backends.cudnn.benchmark = False + np.random.seed(args.seed) + + train(args) diff --git a/models/Scanrefer/scripts/visualize.py b/models/Scanrefer/scripts/visualize.py new file mode 100644 index 0000000..6e23c81 --- /dev/null +++ b/models/Scanrefer/scripts/visualize.py @@ -0,0 +1,553 @@ +import argparse +import importlib +import json +import os +import sys +from datetime import datetime +from shutil import copyfile + +import h5py +import numpy as np +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +from plyfile import PlyData, PlyElement +from torch.utils.data import DataLoader +from tqdm import tqdm + +sys.path.append(os.path.join(os.getcwd())) # HACK add the root folder +from data.scannet.model_util_scannet import ScannetDatasetConfig +from lib.ap_helper import APCalculator, parse_groundtruths, parse_predictions +from lib.config import CONF +from lib.dataset import ScannetReferenceDataset +from lib.eval_helper import get_eval +from lib.loss_helper import get_loss +from lib.solver import Solver +from models.refnet import RefNet +from utils.box_util import box3d_iou, get_3d_box +from utils.pc_utils import write_oriented_bbox, write_ply_rgb + +# data +SCANNET_ROOT = '/path/to/ScanNet/public/v2/scans/' # TODO point this to your scannet data +SCANNET_MESH = os.path.join(SCANNET_ROOT, + '{}/{}_vh_clean_2.ply') # scene_id, scene_id +SCANNET_META = os.path.join(SCANNET_ROOT, '{}/{}.txt') # scene_id, scene_id +SCANREFER_TRAIN = json.load( + open(os.path.join(CONF.PATH.DATA, 'ScanRefer_filtered_train.json'))) +SCANREFER_VAL = json.load( + open(os.path.join(CONF.PATH.DATA, 'ScanRefer_filtered_val.json'))) + +# constants +MEAN_COLOR_RGB = np.array([109.8, 97.2, 83.8]) +DC = ScannetDatasetConfig() + + +def get_dataloader(args, scanrefer, all_scene_list, split, config, augment): + dataset = ScannetReferenceDataset(scanrefer=scanrefer, + scanrefer_all_scene=all_scene_list, + split=split, + num_points=args.num_points, + use_color=args.use_color, + use_height=(not args.no_height), + use_normal=args.use_normal, + use_multiview=args.use_multiview) + + dataloader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False) + + return dataset, dataloader + + +def get_model(args): + # load model + input_channels = int(args.use_multiview) * 128 + int( + args.use_normal) * 3 + int( + args.use_color) * 3 + int(not args.no_height) + model = RefNet(num_class=DC.num_class, + num_heading_bin=DC.num_heading_bin, + num_size_cluster=DC.num_size_cluster, + mean_size_arr=DC.mean_size_arr, + num_proposal=args.num_proposals, + input_feature_dim=input_channels).cuda() + + path = os.path.join(CONF.PATH.OUTPUT, args.folder, 'model.pth') + model.load_state_dict(torch.load(path), strict=False) + model.eval() + + return model + + +def get_scanrefer(args): + scanrefer = SCANREFER_TRAIN if args.use_train else SCANREFER_VAL + all_scene_list = sorted(list(set([data['scene_id'] + for data in scanrefer]))) + if args.scene_id: + assert args.scene_id in all_scene_list, 'The scene_id is not found' + scene_list = [args.scene_id] + else: + scene_list = sorted(list(set([data['scene_id'] + for data in scanrefer]))) + + scanrefer = [data for data in scanrefer if data['scene_id'] in scene_list] + + return scanrefer, scene_list + + +def write_ply(verts, colors, indices, output_file): + if colors is None: + colors = np.zeros_like(verts) + if indices is None: + indices = [] + + file = open(output_file, 'w') + file.write('ply \n') + file.write('format ascii 1.0\n') + file.write('element vertex {:d}\n'.format(len(verts))) + file.write('property float x\n') + file.write('property float y\n') + file.write('property float z\n') + file.write('property uchar red\n') + file.write('property uchar green\n') + file.write('property uchar blue\n') + file.write('element face {:d}\n'.format(len(indices))) + file.write('property list uchar uint vertex_indices\n') + file.write('end_header\n') + for vert, color in zip(verts, colors): + file.write('{:f} {:f} {:f} {:d} {:d} {:d}\n'.format( + vert[0], vert[1], vert[2], int(color[0] * 255), + int(color[1] * 255), int(color[2] * 255))) + for ind in indices: + file.write('3 {:d} {:d} {:d}\n'.format(ind[0], ind[1], ind[2])) + file.close() + + +def write_bbox(bbox, mode, output_file): + """ + bbox: (cx, cy, cz, lx, ly, lz, r), center and length in three axis, the last is the rotation + output_file: string + + """ + + def create_cylinder_mesh(radius, p0, p1, stacks=10, slices=10): + + import math + + def compute_length_vec3(vec3): + return math.sqrt(vec3[0] * vec3[0] + vec3[1] * vec3[1] + + vec3[2] * vec3[2]) + + def rotation(axis, angle): + rot = np.eye(4) + c = np.cos(-angle) + s = np.sin(-angle) + t = 1.0 - c + axis /= compute_length_vec3(axis) + x = axis[0] + y = axis[1] + z = axis[2] + rot[0, 0] = 1 + t * (x * x - 1) + rot[0, 1] = z * s + t * x * y + rot[0, 2] = -y * s + t * x * z + rot[1, 0] = -z * s + t * x * y + rot[1, 1] = 1 + t * (y * y - 1) + rot[1, 2] = x * s + t * y * z + rot[2, 0] = y * s + t * x * z + rot[2, 1] = -x * s + t * y * z + rot[2, 2] = 1 + t * (z * z - 1) + return rot + + verts = [] + indices = [] + diff = (p1 - p0).astype(np.float32) + height = compute_length_vec3(diff) + for i in range(stacks + 1): + for i2 in range(slices): + theta = i2 * 2.0 * math.pi / slices + pos = np.array([ + radius * math.cos(theta), radius * math.sin(theta), + height * i / stacks + ]) + verts.append(pos) + for i in range(stacks): + for i2 in range(slices): + i2p1 = math.fmod(i2 + 1, slices) + indices.append( + np.array([(i + 1) * slices + i2, i * slices + i2, + i * slices + i2p1], + dtype=np.uint32)) + indices.append( + np.array([(i + 1) * slices + i2, i * slices + i2p1, + (i + 1) * slices + i2p1], + dtype=np.uint32)) + transform = np.eye(4) + va = np.array([0, 0, 1], dtype=np.float32) + vb = diff + vb /= compute_length_vec3(vb) + axis = np.cross(vb, va) + angle = np.arccos(np.clip(np.dot(va, vb), -1, 1)) + if angle != 0: + if compute_length_vec3(axis) == 0: + dotx = va[0] + if (math.fabs(dotx) != 1.0): + axis = np.array([1, 0, 0]) - dotx * va + else: + axis = np.array([0, 1, 0]) - va[1] * va + axis /= compute_length_vec3(axis) + transform = rotation(axis, -angle) + transform[:3, 3] += p0 + verts = [ + np.dot(transform, np.array([v[0], v[1], v[2], 1.0])) for v in verts + ] + verts = [np.array([v[0], v[1], v[2]]) / v[3] for v in verts] + + return verts, indices + + def get_bbox_edges(bbox_min, bbox_max): + + def get_bbox_verts(bbox_min, bbox_max): + verts = [ + np.array([bbox_min[0], bbox_min[1], bbox_min[2]]), + np.array([bbox_max[0], bbox_min[1], bbox_min[2]]), + np.array([bbox_max[0], bbox_max[1], bbox_min[2]]), + np.array([bbox_min[0], bbox_max[1], bbox_min[2]]), + np.array([bbox_min[0], bbox_min[1], bbox_max[2]]), + np.array([bbox_max[0], bbox_min[1], bbox_max[2]]), + np.array([bbox_max[0], bbox_max[1], bbox_max[2]]), + np.array([bbox_min[0], bbox_max[1], bbox_max[2]]) + ] + return verts + + box_verts = get_bbox_verts(bbox_min, bbox_max) + edges = [(box_verts[0], box_verts[1]), (box_verts[1], box_verts[2]), + (box_verts[2], box_verts[3]), (box_verts[3], box_verts[0]), + (box_verts[4], box_verts[5]), (box_verts[5], box_verts[6]), + (box_verts[6], box_verts[7]), (box_verts[7], box_verts[4]), + (box_verts[0], box_verts[4]), (box_verts[1], box_verts[5]), + (box_verts[2], box_verts[6]), (box_verts[3], box_verts[7])] + return edges + + def get_bbox_corners(bbox): + centers, lengths = bbox[:3], bbox[3:6] + xmin, xmax = centers[0] - lengths[0] / 2, centers[0] + lengths[0] / 2 + ymin, ymax = centers[1] - lengths[1] / 2, centers[1] + lengths[1] / 2 + zmin, zmax = centers[2] - lengths[2] / 2, centers[2] + lengths[2] / 2 + corners = [] + corners.append(np.array([xmax, ymax, zmax]).reshape(1, 3)) + corners.append(np.array([xmax, ymax, zmin]).reshape(1, 3)) + corners.append(np.array([xmin, ymax, zmin]).reshape(1, 3)) + corners.append(np.array([xmin, ymax, zmax]).reshape(1, 3)) + corners.append(np.array([xmax, ymin, zmax]).reshape(1, 3)) + corners.append(np.array([xmax, ymin, zmin]).reshape(1, 3)) + corners.append(np.array([xmin, ymin, zmin]).reshape(1, 3)) + corners.append(np.array([xmin, ymin, zmax]).reshape(1, 3)) + corners = np.concatenate(corners, axis=0) # 8 x 3 + + return corners + + radius = 0.03 + offset = [0, 0, 0] + verts = [] + indices = [] + colors = [] + corners = get_bbox_corners(bbox) + + box_min = np.min(corners, axis=0) + box_max = np.max(corners, axis=0) + palette = { + 0: [0, 255, 0], # gt + 1: [0, 0, 255] # pred + } + chosen_color = palette[mode] + edges = get_bbox_edges(box_min, box_max) + for k in range(len(edges)): + cyl_verts, cyl_ind = create_cylinder_mesh(radius, edges[k][0], + edges[k][1]) + cur_num_verts = len(verts) + cyl_color = [[c / 255 for c in chosen_color] for _ in cyl_verts] + cyl_verts = [x + offset for x in cyl_verts] + cyl_ind = [x + cur_num_verts for x in cyl_ind] + verts.extend(cyl_verts) + indices.extend(cyl_ind) + colors.extend(cyl_color) + + write_ply(verts, colors, indices, output_file) + + +def read_mesh(filename): + """read XYZ for each vertex.""" + + assert os.path.isfile(filename) + with open(filename, 'rb') as f: + plydata = PlyData.read(f) + num_verts = plydata['vertex'].count + vertices = np.zeros(shape=[num_verts, 6], dtype=np.float32) + vertices[:, 0] = plydata['vertex'].data['x'] + vertices[:, 1] = plydata['vertex'].data['y'] + vertices[:, 2] = plydata['vertex'].data['z'] + vertices[:, 3] = plydata['vertex'].data['red'] + vertices[:, 4] = plydata['vertex'].data['green'] + vertices[:, 5] = plydata['vertex'].data['blue'] + + return vertices, plydata['face'] + + +def export_mesh(vertices, faces): + new_vertices = [] + for i in range(vertices.shape[0]): + new_vertices.append(( + vertices[i][0], + vertices[i][1], + vertices[i][2], + vertices[i][3], + vertices[i][4], + vertices[i][5], + )) + + vertices = np.array(new_vertices, + dtype=[('x', np.dtype('float32')), + ('y', np.dtype('float32')), + ('z', np.dtype('float32')), + ('red', np.dtype('uint8')), + ('green', np.dtype('uint8')), + ('blue', np.dtype('uint8'))]) + + vertices = PlyElement.describe(vertices, 'vertex') + + return PlyData([vertices, faces]) + + +def align_mesh(scene_id): + vertices, faces = read_mesh(SCANNET_MESH.format(scene_id, scene_id)) + for line in open(SCANNET_META.format(scene_id, scene_id)).readlines(): + if 'axisAlignment' in line: + axis_align_matrix = np.array([ + float(x) + for x in line.rstrip().strip('axisAlignment = ').split(' ') + ]).reshape((4, 4)) + break + + # align + pts = np.ones((vertices.shape[0], 4)) + pts[:, :3] = vertices[:, :3] + pts = np.dot(pts, axis_align_matrix.T) + vertices[:, :3] = pts[:, :3] + + mesh = export_mesh(vertices, faces) + + return mesh + + +def dump_results(args, scanrefer, data, config): + dump_dir = os.path.join(CONF.PATH.OUTPUT, args.folder, 'vis') + os.makedirs(dump_dir, exist_ok=True) + + # from inputs + ids = data['scan_idx'].detach().cpu().numpy() + point_clouds = data['point_clouds'].cpu().numpy() + batch_size = point_clouds.shape[0] + + pcl_color = data['pcl_color'].detach().cpu().numpy() + if args.use_color: + pcl_color = (pcl_color * 256 + MEAN_COLOR_RGB).astype(np.int64) + + # from network outputs + # detection + pred_objectness = torch.argmax(data['objectness_scores'], + 2).float().detach().cpu().numpy() + pred_center = data['center'].detach().cpu().numpy() # (B,K,3) + pred_heading_class = torch.argmax(data['heading_scores'], + -1) # B,num_proposal + pred_heading_residual = torch.gather( + data['heading_residuals'], 2, + pred_heading_class.unsqueeze(-1)) # B,num_proposal,1 + pred_heading_class = pred_heading_class.detach().cpu().numpy( + ) # B,num_proposal + pred_heading_residual = pred_heading_residual.squeeze( + 2).detach().cpu().numpy() # B,num_proposal + pred_size_class = torch.argmax(data['size_scores'], -1) # B,num_proposal + pred_size_residual = torch.gather( + data['size_residuals'], 2, + pred_size_class.unsqueeze(-1).unsqueeze(-1).repeat( + 1, 1, 1, 3)) # B,num_proposal,1,3 + pred_size_residual = pred_size_residual.squeeze( + 2).detach().cpu().numpy() # B,num_proposal,3 + # reference + pred_ref_scores = data['cluster_ref'].detach().cpu().numpy() + pred_ref_scores_softmax = F.softmax( + data['cluster_ref'] * + torch.argmax(data['objectness_scores'], 2).float() * data['pred_mask'], + dim=1).detach().cpu().numpy() + # post-processing + nms_masks = data['pred_mask'].detach().cpu().numpy() # B,num_proposal + + # ground truth + gt_center = data['center_label'].cpu().numpy() # (B,MAX_NUM_OBJ,3) + gt_heading_class = data['heading_class_label'].cpu().numpy() # B,K2 + gt_heading_residual = data['heading_residual_label'].cpu().numpy() # B,K2 + gt_size_class = data['size_class_label'].cpu().numpy() # B,K2 + gt_size_residual = data['size_residual_label'].cpu().numpy() # B,K2,3 + # reference + gt_ref_labels = data['ref_box_label'].detach().cpu().numpy() + + for i in range(batch_size): + # basic info + idx = ids[i] + scene_id = scanrefer[idx]['scene_id'] + object_id = scanrefer[idx]['object_id'] + object_name = scanrefer[idx]['object_name'] + ann_id = scanrefer[idx]['ann_id'] + + # scene_output + scene_dump_dir = os.path.join(dump_dir, scene_id) + if not os.path.exists(scene_dump_dir): + os.mkdir(scene_dump_dir) + + # # Dump the original scene point clouds + mesh = align_mesh(scene_id) + mesh.write(os.path.join(scene_dump_dir, 'mesh.ply')) + + write_ply_rgb(point_clouds[i], pcl_color[i], + os.path.join(scene_dump_dir, 'pc.ply')) + + # filter out the valid ground truth reference box + assert gt_ref_labels[i].shape[0] == gt_center[i].shape[0] + gt_ref_idx = np.argmax(gt_ref_labels[i], 0) + + # visualize the gt reference box + # NOTE: for each object there should be only one gt reference box + object_dump_dir = os.path.join( + dump_dir, scene_id, 'gt_{}_{}.ply'.format(object_id, object_name)) + gt_obb = config.param2obb(gt_center[i, gt_ref_idx, + 0:3], gt_heading_class[i, + gt_ref_idx], + gt_heading_residual[i, gt_ref_idx], + gt_size_class[i, gt_ref_idx], + gt_size_residual[i, gt_ref_idx]) + gt_bbox = get_3d_box(gt_obb[3:6], gt_obb[6], gt_obb[0:3]) + + if not os.path.exists(object_dump_dir): + write_bbox( + gt_obb, 0, + os.path.join(scene_dump_dir, + 'gt_{}_{}.ply'.format(object_id, object_name))) + + # find the valid reference prediction + pred_masks = nms_masks[i] * pred_objectness[i] == 1 + assert pred_ref_scores[i].shape[0] == pred_center[i].shape[0] + pred_ref_idx = np.argmax(pred_ref_scores[i] * pred_masks, 0) + assigned_gt = torch.gather( + data['ref_box_label'], 1, + data['object_assignment']).detach().cpu().numpy() + + # visualize the predicted reference box + pred_obb = config.param2obb(pred_center[i, pred_ref_idx, 0:3], + pred_heading_class[i, pred_ref_idx], + pred_heading_residual[i, pred_ref_idx], + pred_size_class[i, pred_ref_idx], + pred_size_residual[i, pred_ref_idx]) + pred_bbox = get_3d_box(pred_obb[3:6], pred_obb[6], pred_obb[0:3]) + iou = box3d_iou(gt_bbox, pred_bbox) + + write_bbox( + pred_obb, 1, + os.path.join( + scene_dump_dir, 'pred_{}_{}_{}_{:.5f}_{:.5f}.ply'.format( + object_id, object_name, ann_id, + pred_ref_scores_softmax[i, pred_ref_idx], iou))) + + +def visualize(args): + # init training dataset + print('preparing data...') + scanrefer, scene_list = get_scanrefer(args) + + # dataloader + _, dataloader = get_dataloader(args, scanrefer, scene_list, 'val', DC, + False) + + # model + model = get_model(args) + + # config + POST_DICT = { + 'remove_empty_box': True, + 'use_3d_nms': True, + 'nms_iou': 0.25, + 'use_old_type_nms': False, + 'cls_nms': True, + 'per_class_proposal': True, + 'conf_thresh': 0.05, + 'dataset_config': DC + } if not args.no_nms else None + + # evaluate + print('visualizing...') + for data in tqdm(dataloader): + for key in data: + data[key] = data[key].cuda() + + # feed + data = model(data) + # _, data = get_loss(data, DC, True, True, POST_DICT) + _, data = get_loss(data_dict=data, + config=DC, + detection=True, + reference=True) + data = get_eval(data_dict=data, + config=DC, + reference=True, + post_processing=POST_DICT) + + # visualize + dump_results(args, scanrefer, data, DC) + + print('done!') + + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + parser.add_argument('--folder', + type=str, + help='Folder containing the model', + required=True) + parser.add_argument('--gpu', type=str, help='gpu', default='0') + parser.add_argument('--scene_id', type=str, help='scene id', default='') + parser.add_argument('--batch_size', type=int, help='batch size', default=8) + parser.add_argument('--num_points', + type=int, + default=40000, + help='Point Number [default: 40000]') + parser.add_argument('--num_proposals', + type=int, + default=256, + help='Proposal number [default: 256]') + parser.add_argument('--num_scenes', + type=int, + default=-1, + help='Number of scenes [default: -1]') + parser.add_argument('--no_height', + action='store_true', + help='Do NOT use height signal in input.') + parser.add_argument( + '--no_nms', + action='store_true', + help='do NOT use non-maximum suppression for post-processing.') + parser.add_argument('--use_train', + action='store_true', + help='Use the training set.') + parser.add_argument('--use_color', + action='store_true', + help='Use RGB color in input.') + parser.add_argument('--use_normal', + action='store_true', + help='Use RGB color in input.') + parser.add_argument('--use_multiview', + action='store_true', + help='Use multiview images.') + args = parser.parse_args() + + # setting + os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu + os.environ['CUDA_LAUNCH_BLOCKING'] = '1' + + visualize(args) diff --git a/models/Scanrefer/train.sh b/models/Scanrefer/train.sh new file mode 100644 index 0000000..298deb4 --- /dev/null +++ b/models/Scanrefer/train.sh @@ -0,0 +1 @@ +python -u scripts/train.py --use_color diff --git a/models/Scanrefer/utils/box_util.py b/models/Scanrefer/utils/box_util.py new file mode 100644 index 0000000..cac3c69 --- /dev/null +++ b/models/Scanrefer/utils/box_util.py @@ -0,0 +1,356 @@ +""" +Helper functions for calculating 2D and 3D bounding box IoU. +From: https://github.com/facebookresearch/votenet/blob/master/utils/box_util.py + +Collected and written by Charles R. Qi +Last modified: Jul 2019 +""" + +from __future__ import print_function + +import numpy as np +from scipy.spatial import ConvexHull + + +def polygon_clip(subjectPolygon, clipPolygon): + """Clip a polygon with another polygon. + + Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python + + Args: + subjectPolygon: a list of (x,y) 2d points, any polygon. + clipPolygon: a list of (x,y) 2d points, has to be *convex* + Note: + **points have to be counter-clockwise ordered** + + Return: + a list of (x,y) vertex point for the intersection polygon. + """ + + def inside(p): + return (cp2[0] - cp1[0]) * (p[1] - cp1[1]) > (cp2[1] - + cp1[1]) * (p[0] - cp1[0]) + + def computeIntersection(): + dc = [cp1[0] - cp2[0], cp1[1] - cp2[1]] + dp = [s[0] - e[0], s[1] - e[1]] + n1 = cp1[0] * cp2[1] - cp1[1] * cp2[0] + n2 = s[0] * e[1] - s[1] * e[0] + n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0]) + return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3] + + outputList = subjectPolygon + cp1 = clipPolygon[-1] + + for clipVertex in clipPolygon: + cp2 = clipVertex + inputList = outputList + outputList = [] + s = inputList[-1] + + for subjectVertex in inputList: + e = subjectVertex + if inside(e): + if not inside(s): + outputList.append(computeIntersection()) + outputList.append(e) + elif inside(s): + outputList.append(computeIntersection()) + s = e + cp1 = cp2 + if len(outputList) == 0: + return None + return (outputList) + + +def poly_area(x, y): + """ Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """ + return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1))) + + +def poly_area_batch(x, y): + """ Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """ + return 0.5 * np.abs(np.matmul(np.expand_dims(x, axis=1), np.roll(np.expand_dims(y, axis=2), 1, axis=1)) \ + - np.matmul(np.expand_dims(y, axis=1), np.roll(np.expand_dims(x, axis=2), 1, axis=1))).squeeze(axis=(1,2)) + + +def convex_hull_intersection(p1, p2): + """Compute area of two convex hull's intersection area. + + p1,p2 are a list of (x,y) tuples of hull vertices. return a list of (x,y) + for the intersection and its volume + """ + inter_p = polygon_clip(p1, p2) + if inter_p is not None: + hull_inter = ConvexHull(inter_p) + return inter_p, hull_inter.volume + else: + return None, 0.0 + + +def box3d_vol(corners): + ''' corners: (8,3) no assumption on axis direction ''' + a = np.sqrt(np.sum((corners[0, :] - corners[1, :])**2)) + b = np.sqrt(np.sum((corners[1, :] - corners[2, :])**2)) + c = np.sqrt(np.sum((corners[0, :] - corners[4, :])**2)) + return a * b * c + + +def is_clockwise(p): + x = p[:, 0] + y = p[:, 1] + return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0 + + +def box3d_iou(corners1, corners2): + """Compute 3D bounding box IoU. + + Input: + corners1: numpy array (8,3), assume up direction is Z + corners2: numpy array (8,3), assume up direction is Z + Output: + iou: 3D bounding box IoU + """ + # # corner points are in counter clockwise order + # rect1 = [(corners1[i,0], corners1[i,2]) for i in range(3,-1,-1)] + # rect2 = [(corners2[i,0], corners2[i,2]) for i in range(3,-1,-1)] + # area1 = poly_area(np.array(rect1)[:,0], np.array(rect1)[:,1]) + # area2 = poly_area(np.array(rect2)[:,0], np.array(rect2)[:,1]) + # inter, inter_area = convex_hull_intersection(rect1, rect2) + # iou_2d = inter_area/(area1+area2-inter_area) + # ymax = min(corners1[0,1], corners2[0,1]) + # ymin = max(corners1[4,1], corners2[4,1]) + # inter_vol = inter_area * max(0.0, ymax-ymin) + # vol1 = box3d_vol(corners1) + # vol2 = box3d_vol(corners2) + # iou = inter_vol / (vol1 + vol2 - inter_vol) + # return iou, iou_2d + + x_min_1, x_max_1, y_min_1, y_max_1, z_min_1, z_max_1 = get_box3d_min_max( + corners1) + x_min_2, x_max_2, y_min_2, y_max_2, z_min_2, z_max_2 = get_box3d_min_max( + corners2) + xA = np.maximum(x_min_1, x_min_2) + yA = np.maximum(y_min_1, y_min_2) + zA = np.maximum(z_min_1, z_min_2) + xB = np.minimum(x_max_1, x_max_2) + yB = np.minimum(y_max_1, y_max_2) + zB = np.minimum(z_max_1, z_max_2) + inter_vol = np.maximum((xB - xA), 0) * np.maximum( + (yB - yA), 0) * np.maximum((zB - zA), 0) + box_vol_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) * (z_max_1 - z_min_1) + box_vol_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2) * (z_max_2 - z_min_2) + iou = inter_vol / (box_vol_1 + box_vol_2 - inter_vol + 1e-8) + + return iou + + +def get_box3d_min_max(corner): + ''' Compute min and max coordinates for 3D bounding box + Note: only for axis-aligned bounding boxes + + Input: + corners: numpy array (8,3), assume up direction is Z (batch of N samples) + Output: + box_min_max: an array for min and max coordinates of 3D bounding box IoU + + ''' + + min_coord = corner.min(axis=0) + max_coord = corner.max(axis=0) + x_min, x_max = min_coord[0], max_coord[0] + y_min, y_max = min_coord[1], max_coord[1] + z_min, z_max = min_coord[2], max_coord[2] + + return x_min, x_max, y_min, y_max, z_min, z_max + + +def box3d_iou_batch(corners1, corners2): + ''' Compute 3D bounding box IoU. + Note: only for axis-aligned bounding boxes + + Input: + corners1: numpy array (N,8,3), assume up direction is Z (batch of N samples) + corners2: numpy array (N,8,3), assume up direction is Z (batch of N samples) + Output: + iou: an array of 3D bounding box IoU + + ''' + + x_min_1, x_max_1, y_min_1, y_max_1, z_min_1, z_max_1 = get_box3d_min_max_batch( + corners1) + x_min_2, x_max_2, y_min_2, y_max_2, z_min_2, z_max_2 = get_box3d_min_max_batch( + corners2) + xA = np.maximum(x_min_1, x_min_2) + yA = np.maximum(y_min_1, y_min_2) + zA = np.maximum(z_min_1, z_min_2) + xB = np.minimum(x_max_1, x_max_2) + yB = np.minimum(y_max_1, y_max_2) + zB = np.minimum(z_max_1, z_max_2) + inter_vol = np.maximum((xB - xA), 0) * np.maximum( + (yB - yA), 0) * np.maximum((zB - zA), 0) + box_vol_1 = (x_max_1 - x_min_1) * (y_max_1 - y_min_1) * (z_max_1 - z_min_1) + box_vol_2 = (x_max_2 - x_min_2) * (y_max_2 - y_min_2) * (z_max_2 - z_min_2) + iou = inter_vol / (box_vol_1 + box_vol_2 - inter_vol + 1e-8) + + return iou + + +def get_box3d_min_max_batch(corner): + ''' Compute min and max coordinates for 3D bounding box + Note: only for axis-aligned bounding boxes + + Input: + corners: numpy array (N,8,3), assume up direction is Z (batch of N samples) + Output: + box_min_max: an array for min and max coordinates of 3D bounding box IoU + + ''' + + min_coord = corner.min(axis=1) + max_coord = corner.max(axis=1) + x_min, x_max = min_coord[:, 0], max_coord[:, 0] + y_min, y_max = min_coord[:, 1], max_coord[:, 1] + z_min, z_max = min_coord[:, 2], max_coord[:, 2] + + return x_min, x_max, y_min, y_max, z_min, z_max + + +def get_iou(bb1, bb2): + """Calculate the Intersection over Union (IoU) of two 2D bounding boxes. + + Parameters + ---------- + bb1 : dict + Keys: {'x1', 'x2', 'y1', 'y2'} + The (x1, y1) position is at the top left corner, + the (x2, y2) position is at the bottom right corner + bb2 : dict + Keys: {'x1', 'x2', 'y1', 'y2'} + The (x, y) position is at the top left corner, + the (x2, y2) position is at the bottom right corner + + Returns + ------- + float + in [0, 1] + """ + assert bb1['x1'] < bb1['x2'] + assert bb1['y1'] < bb1['y2'] + assert bb2['x1'] < bb2['x2'] + assert bb2['y1'] < bb2['y2'] + + # determine the coordinates of the intersection rectangle + x_left = max(bb1['x1'], bb2['x1']) + y_top = max(bb1['y1'], bb2['y1']) + x_right = min(bb1['x2'], bb2['x2']) + y_bottom = min(bb1['y2'], bb2['y2']) + + if x_right < x_left or y_bottom < y_top: + return 0.0 + + # The intersection of two axis-aligned bounding boxes is always an + # axis-aligned bounding box + intersection_area = (x_right - x_left) * (y_bottom - y_top) + + # compute the area of both AABBs + bb1_area = (bb1['x2'] - bb1['x1']) * (bb1['y2'] - bb1['y1']) + bb2_area = (bb2['x2'] - bb2['x1']) * (bb2['y2'] - bb2['y1']) + + # compute the intersection over union by taking the intersection + # area and dividing it by the sum of prediction + ground-truth + # areas - the interesection area + iou = intersection_area / float(bb1_area + bb2_area - intersection_area) + assert iou >= 0.0 + assert iou <= 1.0 + return iou + + +def box2d_iou(box1, box2): + """Compute 2D bounding box IoU. + + Input: + box1: tuple of (xmin,ymin,xmax,ymax) + box2: tuple of (xmin,ymin,xmax,ymax) + Output: + iou: 2D IoU scalar + """ + return get_iou({'x1':box1[0], 'y1':box1[1], 'x2':box1[2], 'y2':box1[3]}, \ + {'x1':box2[0], 'y1':box2[1], 'x2':box2[2], 'y2':box2[3]}) + + +# ----------------------------------------------------------- +# Convert from box parameters to +# ----------------------------------------------------------- +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def get_3d_box(box_size, heading_angle, center): + """box_size is array(l,w,h), heading_angle is radius clockwise from pos x + axis, center is xyz of box center output (8,3) array for 3D box cornders + Similar to utils/compute_orientation_3d.""" + R = roty(heading_angle) + l, w, h = box_size + # x_corners = [l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2] + # y_corners = [h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2] + # z_corners = [w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2] + x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2] + y_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2] + z_corners = [h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2] + corners_3d = np.dot(R, np.vstack([x_corners, y_corners, z_corners])) + corners_3d[0, :] = corners_3d[0, :] + center[0] + corners_3d[1, :] = corners_3d[1, :] + center[1] + corners_3d[2, :] = corners_3d[2, :] + center[2] + corners_3d = np.transpose(corners_3d) + return corners_3d + + +def get_3d_box_batch(box_size, heading_angle, center): + ''' box_size: [x1,x2,...,xn,3] + heading_angle: [x1,x2,...,xn] + center: [x1,x2,...,xn,3] + Return: + [x1,x3,...,xn,8,3] + ''' + input_shape = heading_angle.shape + R = roty_batch(heading_angle) + l = np.expand_dims(box_size[..., 0], -1) # [x1,...,xn,1] + w = np.expand_dims(box_size[..., 1], -1) + h = np.expand_dims(box_size[..., 2], -1) + corners_3d = np.zeros(tuple(list(input_shape) + [8, 3])) + # corners_3d[...,:,0] = np.concatenate((l/2,l/2,-l/2,-l/2,l/2,l/2,-l/2,-l/2), -1) + # corners_3d[...,:,1] = np.concatenate((h/2,h/2,h/2,h/2,-h/2,-h/2,-h/2,-h/2), -1) + # corners_3d[...,:,2] = np.concatenate((w/2,-w/2,-w/2,w/2,w/2,-w/2,-w/2,w/2), -1) + corners_3d[..., :, 0] = np.concatenate( + (l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2), -1) + corners_3d[..., :, 1] = np.concatenate( + (w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2), -1) + corners_3d[..., :, 2] = np.concatenate( + (h / 2, h / 2, h / 2, h / 2, -h / 2, -h / 2, -h / 2, -h / 2), -1) + tlist = [i for i in range(len(input_shape))] + tlist += [len(input_shape) + 1, len(input_shape)] + corners_3d = np.matmul(corners_3d, np.transpose(R, tuple(tlist))) + corners_3d += np.expand_dims(center, -2) + return corners_3d diff --git a/models/Scanrefer/utils/eta.py b/models/Scanrefer/utils/eta.py new file mode 100644 index 0000000..d32596c --- /dev/null +++ b/models/Scanrefer/utils/eta.py @@ -0,0 +1,36 @@ +''' +File Created: Monday, 25th November 2019 1:35:30 pm +Author: Dave Zhenyu Chen (zhenyu.chen@tum.de) +''' + + +def get_eta(start, end, extra, num_left): + exe_s = end - start + eta_s = (exe_s + extra) * num_left + eta = {'h': 0, 'm': 0, 's': 0} + if eta_s < 60: + eta['s'] = int(eta_s) + elif eta_s >= 60 and eta_s < 3600: + eta['m'] = int(eta_s / 60) + eta['s'] = int(eta_s % 60) + else: + eta['h'] = int(eta_s / (60 * 60)) + eta['m'] = int(eta_s % (60 * 60) / 60) + eta['s'] = int(eta_s % (60 * 60) % 60) + + return eta + + +def decode_eta(eta_sec): + eta = {'h': 0, 'm': 0, 's': 0} + if eta_sec < 60: + eta['s'] = int(eta_sec) + elif eta_sec >= 60 and eta_sec < 3600: + eta['m'] = int(eta_sec / 60) + eta['s'] = int(eta_sec % 60) + else: + eta['h'] = int(eta_sec / (60 * 60)) + eta['m'] = int(eta_sec % (60 * 60) / 60) + eta['s'] = int(eta_sec % (60 * 60) % 60) + + return eta diff --git a/models/Scanrefer/utils/eval_det.py b/models/Scanrefer/utils/eval_det.py new file mode 100644 index 0000000..c701a48 --- /dev/null +++ b/models/Scanrefer/utils/eval_det.py @@ -0,0 +1,290 @@ +""" + Generic Code for Object Detection Evaluation + From: https://github.com/facebookresearch/votenet/blob/master/utils/eval_det.py + + Input: + For each class: + For each image: + Predictions: box, score + Groundtruths: box + + Output: + For each class: + precision-recal and average precision + + Author: Charles R. Qi + + Ref: https://raw.githubusercontent.com/rbgirshick/py-faster-rcnn/master/lib/datasets/voc_eval.py +""" +import numpy as np + + +def voc_ap(rec, prec, use_07_metric=False): + """ ap = voc_ap(rec, prec, [use_07_metric]) + Compute VOC AP given precision and recall. + If use_07_metric is true, uses the + VOC 07 11 point method (default:False). + """ + if use_07_metric: + # 11 point metric + ap = 0. + for t in np.arange(0., 1.1, 0.1): + if np.sum(rec >= t) == 0: + p = 0 + else: + p = np.max(prec[rec >= t]) + ap = ap + p / 11. + else: + # correct AP calculation + # first append sentinel values at the end + mrec = np.concatenate(([0.], rec, [1.])) + mpre = np.concatenate(([0.], prec, [0.])) + + # compute the precision envelope + for i in range(mpre.size - 1, 0, -1): + mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) + + # to calculate area under PR curve, look for points + # where X axis (recall) changes value + i = np.where(mrec[1:] != mrec[:-1])[0] + + # and sum (\Delta recall) * prec + ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) + return ap + + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +from utils.metric_util import calc_iou # axis-aligned 3D box IoU + + +def get_iou(bb1, bb2): + """Compute IoU of two bounding boxes. + + ** Define your bod IoU function HERE ** + """ + #pass + iou3d = calc_iou(bb1, bb2) + return iou3d + + +from utils.box_util import box3d_iou + + +def get_iou_obb(bb1, bb2): + iou3d = box3d_iou(bb1, bb2) + return iou3d + + +def get_iou_main(get_iou_func, args): + return get_iou_func(*args) + + +def eval_det_cls(pred, + gt, + ovthresh=0.25, + use_07_metric=False, + get_iou_func=get_iou): + """Generic functions to compute precision/recall for object detection for a + single class. + + Input: + pred: map of {img_id: [(bbox, score)]} where bbox is numpy array + gt: map of {img_id: [bbox]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if True use VOC07 11 point method + Output: + rec: numpy array of length nd + prec: numpy array of length nd + ap: scalar, average precision + """ + + # construct gt objects + class_recs = {} # {img_id: {'bbox': bbox list, 'det': matched list}} + npos = 0 + for img_id in gt.keys(): + bbox = np.array(gt[img_id]) + det = [False] * len(bbox) + npos += len(bbox) + class_recs[img_id] = {'bbox': bbox, 'det': det} + # pad empty list to all other imgids + for img_id in pred.keys(): + if img_id not in gt: + class_recs[img_id] = {'bbox': np.array([]), 'det': []} + + # construct dets + image_ids = [] + confidence = [] + BB = [] + for img_id in pred.keys(): + for box, score in pred[img_id]: + image_ids.append(img_id) + confidence.append(score) + BB.append(box) + confidence = np.array(confidence) + BB = np.array(BB) # (nd,4 or 8,3 or 6) + + # sort by confidence + sorted_ind = np.argsort(-confidence) + sorted_scores = np.sort(-confidence) + BB = BB[sorted_ind, ...] + image_ids = [image_ids[x] for x in sorted_ind] + + # go down dets and mark TPs and FPs + nd = len(image_ids) + tp = np.zeros(nd) + fp = np.zeros(nd) + for d in range(nd): + #if d%100==0: print(d) + R = class_recs[image_ids[d]] + bb = BB[d, ...].astype(float) + ovmax = -np.inf + BBGT = R['bbox'].astype(float) + + if BBGT.size > 0: + # compute overlaps + for j in range(BBGT.shape[0]): + iou = get_iou_main(get_iou_func, (bb, BBGT[j, ...])) + if iou > ovmax: + ovmax = iou + jmax = j + + #print d, ovmax + if ovmax > ovthresh: + if not R['det'][jmax]: + tp[d] = 1. + R['det'][jmax] = 1 + else: + fp[d] = 1. + else: + fp[d] = 1. + + # compute precision recall + fp = np.cumsum(fp) + tp = np.cumsum(tp) + rec = tp / float(npos + 1e-8) + #print('NPOS: ', npos) + # avoid divide by zero in case the first detection matches a difficult + # ground truth + prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps) + ap = voc_ap(rec, prec, use_07_metric) + + return rec, prec, ap + + +def eval_det_cls_wrapper(arguments): + pred, gt, ovthresh, use_07_metric, get_iou_func = arguments + rec, prec, ap = eval_det_cls(pred, gt, ovthresh, use_07_metric, + get_iou_func) + return (rec, prec, ap) + + +def eval_det(pred_all, + gt_all, + ovthresh=0.25, + use_07_metric=False, + get_iou_func=get_iou): + """Generic functions to compute precision/recall for object detection for + multiple classes. + + Input: + pred_all: map of {img_id: [(classname, bbox, score)]} + gt_all: map of {img_id: [(classname, bbox)]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if true use VOC07 11 point method + Output: + rec: {classname: rec} + prec: {classname: prec_all} + ap: {classname: scalar} + """ + pred = {} # map {classname: pred} + gt = {} # map {classname: gt} + for img_id in pred_all.keys(): + for classname, bbox, score in pred_all[img_id]: + if classname not in pred: pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + if classname not in gt: gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + pred[classname][img_id].append((bbox, score)) + for img_id in gt_all.keys(): + for classname, bbox in gt_all[img_id]: + if classname not in gt: gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + gt[classname][img_id].append(bbox) + + rec = {} + prec = {} + ap = {} + for classname in gt.keys(): + print('Computing AP for class: ', classname) + rec[classname], prec[classname], ap[classname] = eval_det_cls( + pred[classname], gt[classname], ovthresh, use_07_metric, + get_iou_func) + print(classname, ap[classname]) + + return rec, prec, ap + + +from multiprocessing import Pool + + +def eval_det_multiprocessing(pred_all, + gt_all, + ovthresh=0.25, + use_07_metric=False, + get_iou_func=get_iou): + """Generic functions to compute precision/recall for object detection for + multiple classes. + + Input: + pred_all: map of {img_id: [(classname, bbox, score)]} + gt_all: map of {img_id: [(classname, bbox)]} + ovthresh: scalar, iou threshold + use_07_metric: bool, if true use VOC07 11 point method + Output: + rec: {classname: rec} + prec: {classname: prec_all} + ap: {classname: scalar} + """ + pred = {} # map {classname: pred} + gt = {} # map {classname: gt} + for img_id in pred_all.keys(): + for classname, bbox, score in pred_all[img_id]: + if classname not in pred: pred[classname] = {} + if img_id not in pred[classname]: + pred[classname][img_id] = [] + if classname not in gt: gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + pred[classname][img_id].append((bbox, score)) + for img_id in gt_all.keys(): + for classname, bbox in gt_all[img_id]: + if classname not in gt: gt[classname] = {} + if img_id not in gt[classname]: + gt[classname][img_id] = [] + gt[classname][img_id].append(bbox) + + rec = {} + prec = {} + ap = {} + p = Pool(processes=10) + ret_values = p.map(eval_det_cls_wrapper, [ + (pred[classname], gt[classname], ovthresh, use_07_metric, get_iou_func) + for classname in gt.keys() if classname in pred + ]) + p.close() + for i, classname in enumerate(gt.keys()): + if classname in pred: + rec[classname], prec[classname], ap[classname] = ret_values[i] + else: + rec[classname] = 0 + prec[classname] = 0 + ap[classname] = 0 + print(classname, ap[classname]) + + return rec, prec, ap diff --git a/models/Scanrefer/utils/metric_util.py b/models/Scanrefer/utils/metric_util.py new file mode 100644 index 0000000..2e783cc --- /dev/null +++ b/models/Scanrefer/utils/metric_util.py @@ -0,0 +1,185 @@ +""" +Utility functions for metric evaluation. +From: https://github.com/facebookresearch/votenet/blob/master/utils/metric_util.py + +Author: Or Litany and Charles R. Qi +""" + +import os +import sys + +import torch + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +import numpy as np +# Mesh IO +import trimesh + +# ---------------------------------------- +# Precision and Recall +# ---------------------------------------- + + +def multi_scene_precision_recall(labels, + pred, + iou_thresh, + conf_thresh, + label_mask, + pred_mask=None): + ''' + Args: + labels: (B, N, 6) + pred: (B, M, 6) + iou_thresh: scalar + conf_thresh: scalar + label_mask: (B, N,) with values in 0 or 1 to indicate which GT boxes to consider. + pred_mask: (B, M,) with values in 0 or 1 to indicate which PRED boxes to consider. + Returns: + TP,FP,FN,Precision,Recall + ''' + # Make sure the masks are not Torch tensor, otherwise the mask==1 returns uint8 array instead + # of True/False array as in numpy + assert (not torch.is_tensor(label_mask)) + assert (not torch.is_tensor(pred_mask)) + TP, FP, FN = 0, 0, 0 + if label_mask is None: + label_mask = np.ones((labels.shape[0], labels.shape[1])) + if pred_mask is None: pred_mask = np.ones((pred.shape[0], pred.shape[1])) + for batch_idx in range(labels.shape[0]): + TP_i, FP_i, FN_i = single_scene_precision_recall( + labels[batch_idx, label_mask[batch_idx, :] == 1, :], + pred[batch_idx, + pred_mask[batch_idx, :] == 1, :], iou_thresh, conf_thresh) + TP += TP_i + FP += FP_i + FN += FN_i + + return TP, FP, FN, precision_recall(TP, FP, FN) + + +def single_scene_precision_recall(labels, pred, iou_thresh, conf_thresh): + """Compute P and R for predicted bounding boxes. + + Ignores classes! + Args: + labels: (N x bbox) ground-truth bounding boxes (6 dims) + pred: (M x (bbox + conf)) predicted bboxes with confidence and maybe classification + Returns: + TP, FP, FN + """ + + # for each pred box with high conf (C), compute IoU with all gt boxes. + # TP = number of times IoU > th ; FP = C - TP + # FN - number of scene objects without good match + + gt_bboxes = labels[:, :6] + + num_scene_bboxes = gt_bboxes.shape[0] + conf = pred[:, 6] + + conf_pred_bbox = pred[np.where(conf > conf_thresh)[0], :6] + num_conf_pred_bboxes = conf_pred_bbox.shape[0] + + # init an array to keep iou between generated and scene bboxes + iou_arr = np.zeros([num_conf_pred_bboxes, num_scene_bboxes]) + for g_idx in range(num_conf_pred_bboxes): + for s_idx in range(num_scene_bboxes): + iou_arr[g_idx, s_idx] = calc_iou(conf_pred_bbox[g_idx, :], + gt_bboxes[s_idx, :]) + + good_match_arr = (iou_arr >= iou_thresh) + + TP = good_match_arr.any(axis=1).sum() + FP = num_conf_pred_bboxes - TP + FN = num_scene_bboxes - good_match_arr.any(axis=0).sum() + + return TP, FP, FN + + +def precision_recall(TP, FP, FN): + Prec = 1.0 * TP / (TP + FP) if TP + FP > 0 else 0 + Rec = 1.0 * TP / (TP + FN) + return Prec, Rec + + +def calc_iou(box_a, box_b): + """Computes IoU of two axis aligned bboxes. + + Args: + box_a, box_b: 6D of center and lengths + Returns: + iou + """ + + max_a = box_a[0:3] + box_a[3:6] / 2 + max_b = box_b[0:3] + box_b[3:6] / 2 + min_max = np.array([max_a, max_b]).min(0) + + min_a = box_a[0:3] - box_a[3:6] / 2 + min_b = box_b[0:3] - box_b[3:6] / 2 + max_min = np.array([min_a, min_b]).max(0) + if not ((min_max > max_min).all()): + return 0.0 + + intersection = (min_max - max_min).prod() + vol_a = box_a[3:6].prod() + vol_b = box_b[3:6].prod() + union = vol_a + vol_b - intersection + return 1.0 * intersection / union + + +if __name__ == '__main__': + print('running some tests') + + ############ + ## Test IoU + ############ + box_a = np.array([0, 0, 0, 1, 1, 1]) + box_b = np.array([0, 0, 0, 2, 2, 2]) + expected_iou = 1.0 / 8 + pred_iou = calc_iou(box_a, box_b) + assert expected_iou == pred_iou, 'function returned wrong IoU' + + box_a = np.array([0, 0, 0, 1, 1, 1]) + box_b = np.array([10, 10, 10, 2, 2, 2]) + expected_iou = 0.0 + pred_iou = calc_iou(box_a, box_b) + assert expected_iou == pred_iou, 'function returned wrong IoU' + + print('IoU test -- PASSED') + + ######################### + ## Test Precition Recall + ######################### + gt_boxes = np.array([[0, 0, 0, 1, 1, 1], [3, 0, 1, 1, 10, 1]]) + detected_boxes = np.array([[0, 0, 0, 1, 1, 1, 1.0], + [3, 0, 1, 1, 10, 1, 0.9]]) + TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, + 0.5) + assert TP == 2 and FP == 0 and FN == 0 + assert precision_recall(TP, FP, FN) == (1, 1) + + detected_boxes = np.array([[0, 0, 0, 1, 1, 1, 1.0]]) + TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, + 0.5) + assert TP == 1 and FP == 0 and FN == 1 + assert precision_recall(TP, FP, FN) == (1, 0.5) + + detected_boxes = np.array([[0, 0, 0, 1, 1, 1, 1.0], + [-1, -1, 0, 0.1, 0.1, 1, 1.0]]) + TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, + 0.5) + assert TP == 1 and FP == 1 and FN == 1 + assert precision_recall(TP, FP, FN) == (0.5, 0.5) + + # wrong box has low confidence + detected_boxes = np.array([[0, 0, 0, 1, 1, 1, 1.0], + [-1, -1, 0, 0.1, 0.1, 1, 0.1]]) + TP, FP, FN = single_scene_precision_recall(gt_boxes, detected_boxes, 0.5, + 0.5) + assert TP == 1 and FP == 0 and FN == 1 + assert precision_recall(TP, FP, FN) == (1, 0.5) + + print('Precition Recall test -- PASSED') diff --git a/models/Scanrefer/utils/nms.py b/models/Scanrefer/utils/nms.py new file mode 100644 index 0000000..aab95ea --- /dev/null +++ b/models/Scanrefer/utils/nms.py @@ -0,0 +1,189 @@ +import numpy as np +from utils.pc_utils import bbox_corner_dist_measure + +# boxes are axis aigned 2D boxes of shape (n,5) in FLOAT numbers with (x1,y1,x2,y2,score) +''' Ref: https://www.pyimagesearch.com/2015/02/16/faster-non-maximum-suppression-python/ +Ref: https://github.com/vickyboy47/nms-python/blob/master/nms.py +''' + + +def nms_2d(boxes, overlap_threshold): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + score = boxes[:, 4] + area = (x2 - x1) * (y2 - y1) + + I = np.argsort(score) + pick = [] + while (I.size != 0): + last = I.size + i = I[-1] + pick.append(i) + suppress = [last - 1] + for pos in range(last - 1): + j = I[pos] + xx1 = max(x1[i], x1[j]) + yy1 = max(y1[i], y1[j]) + xx2 = min(x2[i], x2[j]) + yy2 = min(y2[i], y2[j]) + w = xx2 - xx1 + h = yy2 - yy1 + if (w > 0 and h > 0): + o = w * h / area[j] + print('Overlap is', o) + if (o > overlap_threshold): + suppress.append(pos) + I = np.delete(I, suppress) + return pick + + +def nms_2d_faster(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + x2 = boxes[:, 2] + y2 = boxes[:, 3] + score = boxes[:, 4] + area = (x2 - x1) * (y2 - y1) + + I = np.argsort(score) + pick = [] + while (I.size != 0): + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[:last - 1]]) + yy1 = np.maximum(y1[i], y1[I[:last - 1]]) + xx2 = np.minimum(x2[i], x2[I[:last - 1]]) + yy2 = np.minimum(y2[i], y2[I[:last - 1]]) + + w = np.maximum(0, xx2 - xx1) + h = np.maximum(0, yy2 - yy1) + + if old_type: + o = (w * h) / area[I[:last - 1]] + else: + inter = w * h + o = inter / (area[i] + area[I[:last - 1]] - inter) + + I = np.delete( + I, np.concatenate( + ([last - 1], np.where(o > overlap_threshold)[0]))) + + return pick + + +def nms_3d_faster(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + score = boxes[:, 6] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + + I = np.argsort(score) + pick = [] + while (I.size != 0): + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[:last - 1]]) + yy1 = np.maximum(y1[i], y1[I[:last - 1]]) + zz1 = np.maximum(z1[i], z1[I[:last - 1]]) + xx2 = np.minimum(x2[i], x2[I[:last - 1]]) + yy2 = np.minimum(y2[i], y2[I[:last - 1]]) + zz2 = np.minimum(z2[i], z2[I[:last - 1]]) + + l = np.maximum(0, xx2 - xx1) + w = np.maximum(0, yy2 - yy1) + h = np.maximum(0, zz2 - zz1) + + if old_type: + o = (l * w * h) / area[I[:last - 1]] + else: + inter = l * w * h + o = inter / (area[i] + area[I[:last - 1]] - inter) + + I = np.delete( + I, np.concatenate( + ([last - 1], np.where(o > overlap_threshold)[0]))) + + return pick + + +def nms_3d_faster_samecls(boxes, overlap_threshold, old_type=False): + x1 = boxes[:, 0] + y1 = boxes[:, 1] + z1 = boxes[:, 2] + x2 = boxes[:, 3] + y2 = boxes[:, 4] + z2 = boxes[:, 5] + score = boxes[:, 6] + cls = boxes[:, 7] + area = (x2 - x1) * (y2 - y1) * (z2 - z1) + + I = np.argsort(score) + pick = [] + while (I.size != 0): + last = I.size + i = I[-1] + pick.append(i) + + xx1 = np.maximum(x1[i], x1[I[:last - 1]]) + yy1 = np.maximum(y1[i], y1[I[:last - 1]]) + zz1 = np.maximum(z1[i], z1[I[:last - 1]]) + xx2 = np.minimum(x2[i], x2[I[:last - 1]]) + yy2 = np.minimum(y2[i], y2[I[:last - 1]]) + zz2 = np.minimum(z2[i], z2[I[:last - 1]]) + cls1 = cls[i] + cls2 = cls[I[:last - 1]] + + l = np.maximum(0, xx2 - xx1) + w = np.maximum(0, yy2 - yy1) + h = np.maximum(0, zz2 - zz1) + + if old_type: + o = (l * w * h) / area[I[:last - 1]] + else: + inter = l * w * h + o = inter / (area[i] + area[I[:last - 1]] - inter) + o = o * (cls1 == cls2) + + I = np.delete( + I, np.concatenate( + ([last - 1], np.where(o > overlap_threshold)[0]))) + + return pick + + +def nms_crnr_dist(boxes, conf, overlap_threshold): + + I = np.argsort(conf) + pick = [] + while (I.size != 0): + last = I.size + i = I[-1] + pick.append(i) + + scores = [] + for ind in I[:-1]: + scores.append(bbox_corner_dist_measure(boxes[i, :], boxes[ind, :])) + + I = np.delete( + I, + np.concatenate( + ([last - 1], + np.where(np.array(scores) > overlap_threshold)[0]))) + + return pick + + +if __name__ == '__main__': + a = np.random.random((100, 5)) + print(nms_2d(a, 0.9)) + print(nms_2d_faster(a, 0.9)) diff --git a/models/Scanrefer/utils/nn_distance.py b/models/Scanrefer/utils/nn_distance.py new file mode 100644 index 0000000..3ea4550 --- /dev/null +++ b/models/Scanrefer/utils/nn_distance.py @@ -0,0 +1,94 @@ +""" +Chamfer distance in Pytorch. +Author: Charles R. Qi + +From: https://github.com/facebookresearch/votenet/blob/master/utils/nn_distance.py +""" + +import numpy as np +import torch +import torch.nn as nn + + +def huber_loss(error, delta=1.0): + """ + Args: + error: Torch tensor (d1,d2,...,dk) + Returns: + loss: Torch tensor (d1,d2,...,dk) + + x = error = pred - gt or dist(pred,gt) + 0.5 * |x|^2 if |x|<=d + 0.5 * d^2 + d * (|x|-d) if |x|>d + Ref: https://github.com/charlesq34/frustum-pointnets/blob/master/models/model_util.py + """ + abs_error = torch.abs(error) + #quadratic = torch.min(abs_error, torch.FloatTensor([delta])) + quadratic = torch.clamp(abs_error, max=delta) + linear = (abs_error - quadratic) + loss = 0.5 * quadratic**2 + delta * linear + return loss + + +def nn_distance(pc1, pc2, l1smooth=False, delta=1.0, l1=False): + """ + Input: + pc1: (B,N,C) torch tensor + pc2: (B,M,C) torch tensor + l1smooth: bool, whether to use l1smooth loss + delta: scalar, the delta used in l1smooth loss + Output: + dist1: (B,N) torch float32 tensor + idx1: (B,N) torch int64 tensor + dist2: (B,M) torch float32 tensor + idx2: (B,M) torch int64 tensor + """ + N = pc1.shape[1] + M = pc2.shape[1] + pc1_expand_tile = pc1.unsqueeze(2).repeat(1, 1, M, 1) + pc2_expand_tile = pc2.unsqueeze(1).repeat(1, N, 1, 1) + pc_diff = pc1_expand_tile - pc2_expand_tile + + if l1smooth: + pc_dist = torch.sum(huber_loss(pc_diff, delta), dim=-1) # (B,N,M) + elif l1: + pc_dist = torch.sum(torch.abs(pc_diff), dim=-1) # (B,N,M) + else: + pc_dist = torch.sum(pc_diff**2, dim=-1) # (B,N,M) + dist1, idx1 = torch.min(pc_dist, dim=2) # (B,N) + dist2, idx2 = torch.min(pc_dist, dim=1) # (B,M) + return dist1, idx1, dist2, idx2 + + +def demo_nn_distance(): + np.random.seed(0) + pc1arr = np.random.random((1, 5, 3)) + pc2arr = np.random.random((1, 6, 3)) + pc1 = torch.from_numpy(pc1arr.astype(np.float32)) + pc2 = torch.from_numpy(pc2arr.astype(np.float32)) + dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2) + print(dist1) + print(idx1) + dist = np.zeros((5, 6)) + for i in range(5): + for j in range(6): + dist[i, j] = np.sum((pc1arr[0, i, :] - pc2arr[0, j, :])**2) + print(dist) + print('-' * 30) + print('L1smooth dists:') + dist1, idx1, dist2, idx2 = nn_distance(pc1, pc2, True) + print(dist1) + print(idx1) + dist = np.zeros((5, 6)) + for i in range(5): + for j in range(6): + error = np.abs(pc1arr[0, i, :] - pc2arr[0, j, :]) + quad = np.minimum(error, 1.0) + linear = error - quad + loss = 0.5 * quad**2 + 1.0 * linear + dist[i, j] = np.sum(loss) + print(dist) + + +if __name__ == '__main__': + demo_nn_distance() diff --git a/models/Scanrefer/utils/pc_utils.py b/models/Scanrefer/utils/pc_utils.py new file mode 100644 index 0000000..ba5c45f --- /dev/null +++ b/models/Scanrefer/utils/pc_utils.py @@ -0,0 +1,574 @@ +""" +Utility functions for processing point clouds. +From: https://github.com/facebookresearch/votenet/blob/master/utils/pc_util.py + +Author: Charles R. Qi and Or Litany +""" + +import os +import sys + +BASE_DIR = os.path.dirname(os.path.abspath(__file__)) +sys.path.append(BASE_DIR) + +# Point cloud IO +import numpy as np + +try: + from plyfile import PlyData, PlyElement +except: + print("Please install the module 'plyfile' for PLY i/o, e.g.") + print('pip install plyfile') + sys.exit(-1) + +import matplotlib.pyplot as pyplot +# Mesh IO +import trimesh + +# ---------------------------------------- +# Point Cloud Sampling +# ---------------------------------------- + + +def random_sampling(pc, num_sample, replace=None, return_choices=False): + """Input is NxC, output is num_samplexC.""" + if replace is None: replace = (pc.shape[0] < num_sample) + choices = np.random.choice(pc.shape[0], num_sample, replace=replace) + if return_choices: + return pc[choices], choices + else: + return pc[choices] + + +# ---------------------------------------- +# Point Cloud/Volume Conversions +# ---------------------------------------- + + +def point_cloud_to_volume_batch(point_clouds, + vsize=12, + radius=1.0, + flatten=True): + """Input is BxNx3 batch of point cloud Output is Bx(vsize^3)""" + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume(np.squeeze(point_clouds[b, :, :]), vsize, + radius) + if flatten: + vol_list.append(vol.flatten()) + else: + vol_list.append(np.expand_dims(np.expand_dims(vol, -1), 0)) + if flatten: + return np.vstack(vol_list) + else: + return np.concatenate(vol_list, 0) + + +def point_cloud_to_volume(points, vsize, radius=1.0): + """input is Nx3 points. + + output is vsize*vsize*vsize assumes points are in range [-radius, radius] + """ + vol = np.zeros((vsize, vsize, vsize)) + voxel = 2 * radius / float(vsize) + locations = (points + radius) / voxel + locations = locations.astype(int) + vol[locations[:, 0], locations[:, 1], locations[:, 2]] = 1.0 + return vol + + +def volume_to_point_cloud(vol): + """ vol is occupancy grid (value = 0 or 1) of size vsize*vsize*vsize + return Nx3 numpy array. + """ + vsize = vol.shape[0] + assert (vol.shape[1] == vsize and vol.shape[1] == vsize) + points = [] + for a in range(vsize): + for b in range(vsize): + for c in range(vsize): + if vol[a, b, c] == 1: + points.append(np.array([a, b, c])) + if len(points) == 0: + return np.zeros((0, 3)) + points = np.vstack(points) + return points + + +def point_cloud_to_volume_v2_batch(point_clouds, + vsize=12, + radius=1.0, + num_sample=128): + """Input is BxNx3 a batch of point cloud Output is BxVxVxVxnum_samplex3 + Added on Feb 19.""" + vol_list = [] + for b in range(point_clouds.shape[0]): + vol = point_cloud_to_volume_v2(point_clouds[b, :, :], vsize, radius, + num_sample) + vol_list.append(np.expand_dims(vol, 0)) + return np.concatenate(vol_list, 0) + + +def point_cloud_to_volume_v2(points, vsize, radius=1.0, num_sample=128): + """input is Nx3 points output is vsize*vsize*vsize*num_sample*3 assumes + points are in range [-radius, radius] samples num_sample points in each + voxel, if there are less than num_sample points, replicate the points Added + on Feb 19.""" + vol = np.zeros((vsize, vsize, vsize, num_sample, 3)) + voxel = 2 * radius / float(vsize) + locations = (points + radius) / voxel + locations = locations.astype(int) + loc2pc = {} + for n in range(points.shape[0]): + loc = tuple(locations[n, :]) + if loc not in loc2pc: + loc2pc[loc] = [] + loc2pc[loc].append(points[n, :]) + + for i in range(vsize): + for j in range(vsize): + for k in range(vsize): + if (i, j, k) not in loc2pc: + vol[i, j, k, :, :] = np.zeros((num_sample, 3)) + else: + pc = loc2pc[(i, j, k)] # a list of (3,) arrays + pc = np.vstack(pc) # kx3 + # Sample/pad to num_sample points + if pc.shape[0] > num_sample: + pc = random_sampling(pc, num_sample, False) + elif pc.shape[0] < num_sample: + pc = np.lib.pad(pc, ((0, num_sample - pc.shape[0]), + (0, 0)), 'edge') + # Normalize + pc_center = (np.array([i, j, k]) + 0.5) * voxel - radius + pc = (pc - pc_center) / voxel # shift and scale + vol[i, j, k, :, :] = pc + return vol + + +def point_cloud_to_image_batch(point_clouds, + imgsize, + radius=1.0, + num_sample=128): + """Input is BxNx3 a batch of point cloud Output is BxIxIxnum_samplex3 Added + on Feb 19.""" + img_list = [] + for b in range(point_clouds.shape[0]): + img = point_cloud_to_image(point_clouds[b, :, :], imgsize, radius, + num_sample) + img_list.append(np.expand_dims(img, 0)) + return np.concatenate(img_list, 0) + + +def point_cloud_to_image(points, imgsize, radius=1.0, num_sample=128): + """input is Nx3 points output is imgsize*imgsize*num_sample*3 assumes + points are in range [-radius, radius] samples num_sample points in each + pixel, if there are less than num_sample points, replicate the points Added + on Feb 19.""" + img = np.zeros((imgsize, imgsize, num_sample, 3)) + pixel = 2 * radius / float(imgsize) + locations = (points[:, 0:2] + radius) / pixel # Nx2 + locations = locations.astype(int) + loc2pc = {} + for n in range(points.shape[0]): + loc = tuple(locations[n, :]) + if loc not in loc2pc: + loc2pc[loc] = [] + loc2pc[loc].append(points[n, :]) + for i in range(imgsize): + for j in range(imgsize): + if (i, j) not in loc2pc: + img[i, j, :, :] = np.zeros((num_sample, 3)) + else: + pc = loc2pc[(i, j)] + pc = np.vstack(pc) + if pc.shape[0] > num_sample: + pc = random_sampling(pc, num_sample, False) + elif pc.shape[0] < num_sample: + pc = np.lib.pad(pc, + ((0, num_sample - pc.shape[0]), (0, 0)), + 'edge') + pc_center = (np.array([i, j]) + 0.5) * pixel - radius + pc[:, 0:2] = (pc[:, 0:2] - pc_center) / pixel + img[i, j, :, :] = pc + return img + + +# ---------------------------------------- +# Point cloud IO +# ---------------------------------------- + + +def read_ply(filename): + """read XYZ point cloud from filename PLY file.""" + plydata = PlyData.read(filename) + pc = plydata['vertex'].data + pc_array = np.array([[x, y, z] for x, y, z in pc]) + return pc_array + + +def write_ply(points, filename, text=True): + """ input: Nx3, write points to filename as PLY format. """ + points = [(points[i, 0], points[i, 1], points[i, 2]) + for i in range(points.shape[0])] + vertex = np.array(points, dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4')]) + el = PlyElement.describe(vertex, 'vertex', comments=['vertices']) + PlyData([el], text=text).write(filename) + + +def write_ply_color(points, + labels, + filename, + num_classes=None, + colormap=pyplot.cm.jet): + """Color (N,3) points with labels (N) within range 0 ~ num_classes-1 as OBJ + file.""" + labels = labels.astype(int) + N = points.shape[0] + if num_classes is None: + num_classes = np.max(labels) + 1 + else: + assert (num_classes > np.max(labels)) + + vertex = [] + #colors = [pyplot.cm.jet(i/float(num_classes)) for i in range(num_classes)] + colors = [colormap(i / float(num_classes)) for i in range(num_classes)] + for i in range(N): + c = colors[labels[i]] + c = [int(x * 255) for x in c] + vertex.append( + (points[i, 0], points[i, 1], points[i, 2], c[0], c[1], c[2])) + vertex = np.array(vertex, + dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), + ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) + + el = PlyElement.describe(vertex, 'vertex', comments=['vertices']) + PlyData([el], text=True).write(filename) + + +def write_ply_rgb(points, colors, filename, text=True, num_classes=None): + """Color (N,3) points with RGB colors (N,3) within range [0,255] as OBJ + file.""" + colors = colors.astype(int) + points = [(points[i, 0], points[i, 1], points[i, 2], colors[i, 0], + colors[i, 1], colors[i, 2]) for i in range(points.shape[0])] + vertex = np.array(points, + dtype=[('x', 'f4'), ('y', 'f4'), ('z', 'f4'), + ('red', 'u1'), ('green', 'u1'), ('blue', 'u1')]) + el = PlyElement.describe(vertex, 'vertex', comments=['vertices']) + PlyData([el], text=text).write(filename) + + +# ---------------------------------------- +# Simple Point cloud and Volume Renderers +# ---------------------------------------- + + +def pyplot_draw_point_cloud(points, output_filename): + """points is a Nx3 numpy array.""" + import matplotlib.pyplot as plt + fig = plt.figure() + ax = fig.add_subplot(111, projection='3d') + ax.scatter(points[:, 0], points[:, 1], points[:, 2]) + ax.set_xlabel('x') + ax.set_ylabel('y') + ax.set_zlabel('z') + #savefig(output_filename) + + +def pyplot_draw_volume(vol, output_filename): + """vol is of size vsize*vsize*vsize output an image to output_filename.""" + points = volume_to_point_cloud(vol) + pyplot_draw_point_cloud(points, output_filename) + + +# ---------------------------------------- +# Simple Point manipulations +# ---------------------------------------- +def rotate_point_cloud(points, rotation_matrix=None): + """ Input: (n,3), Output: (n,3) """ + # Rotate in-place around Z axis. + if rotation_matrix is None: + rotation_angle = np.random.uniform() * 2 * np.pi + sinval, cosval = np.sin(rotation_angle), np.cos(rotation_angle) + rotation_matrix = np.array([[cosval, sinval, 0], [-sinval, cosval, 0], + [0, 0, 1]]) + ctr = points.mean(axis=0) + rotated_data = np.dot(points - ctr, rotation_matrix) + ctr + return rotated_data, rotation_matrix + + +def rotate_pc_along_y(pc, rot_angle): + """Input ps is NxC points with first 3 channels as XYZ z is facing forward, + x is left ward, y is downward.""" + cosval = np.cos(rot_angle) + sinval = np.sin(rot_angle) + rotmat = np.array([[cosval, -sinval], [sinval, cosval]]) + pc[:, [0, 2]] = np.dot(pc[:, [0, 2]], np.transpose(rotmat)) + return pc + + +def rotx(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[1, 0, 0], [0, c, -s], [0, s, c]]) + + +def roty(t): + """Rotation about the y-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]]) + + +def roty_batch(t): + """Rotation about the y-axis. + + t: (x1,x2,...xn) + return: (x1,x2,...,xn,3,3) + """ + input_shape = t.shape + output = np.zeros(tuple(list(input_shape) + [3, 3])) + c = np.cos(t) + s = np.sin(t) + output[..., 0, 0] = c + output[..., 0, 2] = s + output[..., 1, 1] = 1 + output[..., 2, 0] = -s + output[..., 2, 2] = c + return output + + +def rotz(t): + """Rotation about the z-axis.""" + c = np.cos(t) + s = np.sin(t) + return np.array([[c, -s, 0], [s, c, 0], [0, 0, 1]]) + + +# ---------------------------------------- +# BBox +# ---------------------------------------- +def bbox_corner_dist_measure(crnr1, crnr2): + """ compute distance between box corners to replace iou + Args: + crnr1, crnr2: Nx3 points of box corners in camera axis (y points down) + output is a scalar between 0 and 1 + """ + + dist = sys.maxsize + for y in range(4): + rows = ([(x + y) % 4 + for x in range(4)] + [4 + (x + y) % 4 for x in range(4)]) + d_ = np.linalg.norm(crnr2[rows, :] - crnr1, axis=1).sum() / 8.0 + if d_ < dist: + dist = d_ + + u = sum([np.linalg.norm(x[0, :] - x[6, :]) for x in [crnr1, crnr2]]) / 2.0 + + measure = max(1.0 - dist / u, 0) + print(measure) + + return measure + + +def point_cloud_to_bbox(points): + """ Extract the axis aligned box from a pcl or batch of pcls + Args: + points: Nx3 points or BxNx3 + output is 6 dim: xyz pos of center and 3 lengths + """ + which_dim = len( + points.shape) - 2 # first dim if a single cloud and second if batch + mn, mx = points.min(which_dim), points.max(which_dim) + lengths = mx - mn + cntr = 0.5 * (mn + mx) + return np.concatenate([cntr, lengths], axis=which_dim) + + +def write_bbox(scene_bbox, out_filename): + """Export scene bbox to meshes + Args: + scene_bbox: (N x 6 numpy array): xyz pos of center and 3 lengths + out_filename: (string) filename + + Note: + To visualize the boxes in MeshLab. + 1. Select the objects (the boxes) + 2. Filters -> Polygon and Quad Mesh -> Turn into Quad-Dominant Mesh + 3. Select Wireframe view. + """ + + def convert_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply') + + return + + +def write_oriented_bbox(scene_bbox, out_filename): + """Export oriented (around Z axis) scene bbox to meshes + Args: + scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz) + and heading angle around Z axis. + Y forward, X right, Z upward. heading angle of positive X is 0, + heading angle of positive Y is 90 degrees. + out_filename: (string) filename + """ + + def heading2rotmat(heading_angle): + pass + rotmat = np.zeros((3, 3)) + rotmat[2, 2] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0:2, 0:2] = np.array([[cosval, -sinval], [sinval, cosval]]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply') + + return + + +def write_oriented_bbox_camera_coord(scene_bbox, out_filename): + """Export oriented (around Y axis) scene bbox to meshes + Args: + scene_bbox: (N x 7 numpy array): xyz pos of center and 3 lengths (dx,dy,dz) + and heading angle around Y axis. + Z forward, X rightward, Y downward. heading angle of positive X is 0, + heading angle of negative Z is 90 degrees. + out_filename: (string) filename + """ + + def heading2rotmat(heading_angle): + pass + rotmat = np.zeros((3, 3)) + rotmat[1, 1] = 1 + cosval = np.cos(heading_angle) + sinval = np.sin(heading_angle) + rotmat[0, :] = np.array([cosval, 0, sinval]) + rotmat[2, :] = np.array([-sinval, 0, cosval]) + return rotmat + + def convert_oriented_box_to_trimesh_fmt(box): + ctr = box[:3] + lengths = box[3:6] + trns = np.eye(4) + trns[0:3, 3] = ctr + trns[3, 3] = 1.0 + trns[0:3, 0:3] = heading2rotmat(box[6]) + box_trimesh_fmt = trimesh.creation.box(lengths, trns) + return box_trimesh_fmt + + scene = trimesh.scene.Scene() + for box in scene_bbox: + scene.add_geometry(convert_oriented_box_to_trimesh_fmt(box)) + + mesh_list = trimesh.util.concatenate(scene.dump()) + # save to ply file + trimesh.io.export.export_mesh(mesh_list, out_filename, file_type='ply') + + return + + +def write_lines_as_cylinders(pcl, filename, rad=0.005, res=64): + """Create lines represented as cylinders connecting pairs of 3D points + Args: + pcl: (N x 2 x 3 numpy array): N pairs of xyz pos + filename: (string) filename for the output mesh (ply) file + rad: radius for the cylinder + res: number of sections used to create the cylinder + """ + scene = trimesh.scene.Scene() + for src, tgt in pcl: + # compute line + vec = tgt - src + M = trimesh.geometry.align_vectors([0, 0, 1], vec, False) + vec = tgt - src # compute again since align_vectors modifies vec in-place! + M[:3, 3] = 0.5 * src + 0.5 * tgt + height = np.sqrt(np.dot(vec, vec)) + scene.add_geometry( + trimesh.creation.cylinder(radius=rad, + height=height, + sections=res, + transform=M)) + mesh_list = trimesh.util.concatenate(scene.dump()) + trimesh.io.export.export_mesh(mesh_list, + f'{filename}.ply', + file_type='ply') + + +# ---------------------------------------- +# Testing +# ---------------------------------------- +if __name__ == '__main__': + print('running some tests') + + ############ + ## Test "write_lines_as_cylinders" + ############ + pcl = np.random.rand(32, 2, 3) + write_lines_as_cylinders(pcl, 'point_connectors') + input() + + scene_bbox = np.zeros((1, 7)) + scene_bbox[0, 3:6] = np.array([1, 2, 3]) # dx,dy,dz + scene_bbox[0, 6] = np.pi / 4 # 45 degrees + write_oriented_bbox(scene_bbox, 'single_obb_45degree.ply') + ############ + ## Test point_cloud_to_bbox + ############ + pcl = np.random.rand(32, 16, 3) + pcl_bbox = point_cloud_to_bbox(pcl) + assert pcl_bbox.shape == (32, 6) + + pcl = np.random.rand(16, 3) + pcl_bbox = point_cloud_to_bbox(pcl) + assert pcl_bbox.shape == (6, ) + + ############ + ## Test corner distance + ############ + crnr1 = np.array([[2.59038660e+00, 8.96107932e-01, 4.73305349e+00], + [4.12281644e-01, 8.96107932e-01, 4.48046631e+00], + [2.97129656e-01, 8.96107932e-01, 5.47344275e+00], + [2.47523462e+00, 8.96107932e-01, 5.72602993e+00], + [2.59038660e+00, 4.41155793e-03, 4.73305349e+00], + [4.12281644e-01, 4.41155793e-03, 4.48046631e+00], + [2.97129656e-01, 4.41155793e-03, 5.47344275e+00], + [2.47523462e+00, 4.41155793e-03, 5.72602993e+00]]) + crnr2 = crnr1 + + print(bbox_corner_dist_measure(crnr1, crnr2)) + + print('tests PASSED') diff --git a/requirements/QA.txt b/requirements/QA.txt new file mode 100644 index 0000000..798eca2 --- /dev/null +++ b/requirements/QA.txt @@ -0,0 +1,5 @@ +openai +pycocoevalcap +sentence-transformers +simcse +transformers diff --git a/requirements/VG.txt b/requirements/VG.txt new file mode 100644 index 0000000..652a436 --- /dev/null +++ b/requirements/VG.txt @@ -0,0 +1 @@ +terminaltables diff --git a/requirements/base.txt b/requirements/base.txt index 9283454..34d0768 100644 --- a/requirements/base.txt +++ b/requirements/base.txt @@ -1,5 +1,4 @@ -mmengine -numpy==1.23.5 -opencv-python +numpy +plyfile +pytorch3d torch -tqdm diff --git a/requirements/run.txt b/requirements/run.txt deleted file mode 100644 index f21b40a..0000000 --- a/requirements/run.txt +++ /dev/null @@ -1,7 +0,0 @@ -MinkowskiEngine @ git+https://github.com/NVIDIA/MinkowskiEngine.git -mmcv==2.0.0rc4 -mmdet -mmengine -ninja -pytorch3d -transformers diff --git a/setup.py b/setup.py index e5727ba..c2c2a10 100644 --- a/setup.py +++ b/setup.py @@ -1,110 +1,23 @@ from setuptools import find_packages, setup - -def parse_requirements(fname='requirements.txt', with_version=True): - """Parse the package dependencies listed in a requirements file but strips - specific versioning information. - - Args: - fname (str): path to requirements file - with_version (bool, default=False): if True include version specs - - Returns: - list[str]: list of requirements items - - CommandLine: - python -c "import setup; print(setup.parse_requirements())" - """ - import re - import sys - from os.path import exists - require_fpath = fname - - def parse_line(line): - """Parse information from a line in a requirements text file.""" - if line.startswith('-r '): - # Allow specifying requirements in other files - target = line.split(' ')[1] - for info in parse_require_file(target): - yield info - else: - info = {'line': line} - if line.startswith('-e '): - info['package'] = line.split('#egg=')[1] - else: - # Remove versioning from the package - pat = '(' + '|'.join(['>=', '==', '>']) + ')' - parts = re.split(pat, line, maxsplit=1) - parts = [p.strip() for p in parts] - - info['package'] = parts[0] - if len(parts) > 1: - op, rest = parts[1:] - if ';' in rest: - # Handle platform specific dependencies - # http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies - version, platform_deps = map(str.strip, - rest.split(';')) - info['platform_deps'] = platform_deps - else: - version = rest # NOQA - info['version'] = (op, version) - yield info - - def parse_require_file(fpath): - with open(fpath, 'r') as f: - for line in f.readlines(): - line = line.strip() - if line and not line.startswith('#'): - for info in parse_line(line): - yield info - - def gen_packages_items(): - if exists(require_fpath): - for info in parse_require_file(require_fpath): - parts = [info['package']] - if with_version and 'version' in info: - parts.extend(info['version']) - if not sys.version.startswith('3.4'): - # apparently package_deps are broken in 3.4 - platform_deps = info.get('platform_deps') - if platform_deps is not None: - parts.append(';' + platform_deps) - item = ''.join(parts) - yield item - - packages = list(gen_packages_items()) - return packages - - if __name__ == '__main__': + my_packages = find_packages() + setup( - name='embodiedscan', - version='0.1', - description='EmbodiedScan', - # long_description=readme(), - # long_description_content_type='text/markdown', - author='author', # TODO - author_email='email', # TODO - keywords='computer vision, 3D object detection', - url='https://github.com/open-mmlab/mmdetection3d', - packages=find_packages(exclude=('configs', 'tools', 'demo')), - include_package_data=True, + name='mmscan', + version='0.0.0', + author='linjingli', + author_email='rbler1234@sjtu.edu.cn', + description='MMScan tools for data loading and evaluation', + long_description=open('README.md').read(), + long_description_content_type='text/markdown', + url='https://github.com/yourusername/your_library', + packages=my_packages, classifiers=[ - 'Development Status :: 3 - Alpha', - 'License :: OSI Approved :: Apache Software License', - 'Operating System :: OS Independent', 'Programming Language :: Python :: 3', - 'Programming Language :: Python :: 3.7', - 'Programming Language :: Python :: 3.8', - 'Programming Language :: Python :: 3.9', + 'License :: OSI Approved :: MIT License', + 'Operating System :: OS Independent', ], - python_requires='>=3.7', - license='Apache License 2.0', - install_requires=parse_requirements('requirements/base.txt'), - extras_require={ - 'visual': parse_requirements('requirements/visual.txt'), - }, - ext_modules=[], - # cmdclass={'build_ext': BuildExtension}, - zip_safe=False) + python_requires='>=3.6', + install_requires=[], + )